repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
kotaura/btcconverter | app.py | 2 | 20429 | #coding:utf-8
from flask import Flask, render_template, request, redirect, url_for, Blueprint
from blockchain import exchangerates, statistics
from chart import chart
import urllib2, json, requests, datetime
import pandas as pd
import numpy as np
import scipy.stats
#Global variables
app = Flask(__name__)
app.register_blueprint(chart)
ticker = exchangerates.get_ticker()
stats = statistics.get()
actualpricelist = []
actualtimelist = []
actualpricelist_rev = []
actualtimelist_rev = []
actual = ['Actual Price']
date = ['Date']
#Chart section
jsonfilein = 'https://blockchain.info/charts/market-price?showDataPoints=false×pan=&show_header=true&daysAverageString=1&scale=0&format=json&address='
r = requests.get(jsonfilein)
j = r.json()
entries = j['values']
#Def
def ccylists():
return ticker
def ccyprice():
for k in ticker:
yield ticker[k].p15min
def ccysymbol():
for s in ticker:
yield ticker[s].symbol
def ccyset():
for k in ticker:
return k, ticker[k].symbol, ticker[k].p15min
def actualprice():
for e in entries:
yield e['y']
def actualtime():
for e in entries:
yield datetime.datetime.fromtimestamp(int(e['x'])).strftime('%Y-%m-%d')
def predictionprice():
for e in entries:
yield e['y']
def predictiontime():
for e in entries:
yield e['x']
#Flask view
@app.route('/')
def index():
return render_template('index.html', title=title)
@app.route('/chart')
def chart():
actualpricelist = []
actualtimelist = []
actualpricelist_rev = []
actualtimelist_rev = []
predictiontimelist = []
predictiontimelist_rev = []
predictiontimelist_rev_decoded = ['PDate']
for ap in actualprice():
actualpricelist_rev.append(ap)
actualpricelist_rev.reverse()
aplrev = actualpricelist_rev
apl = actual + aplrev
ppl = pd.Series(aplrev)
for t in actualtime():
actualtimelist_rev.append(t)
actualtimelist_rev.reverse()
atrev = actualtimelist_rev
atl = date + atrev
for pt in predictiontime():
predictiontimelist_rev.append(pt)
predictiontimelist_rev.reverse()
ptrev = predictiontimelist_rev
traintime = [] #Actual unix time for train data
traintimelist = [] #Actual unix time for train data
for tt in predictiontime():
traintime.append(tt)
traintime.reverse()
traintimelist = traintime
ptl = pd.Series(traintimelist)
for timedecode in ptrev:
predictiontimelist_rev_decoded.append(datetime.datetime.fromtimestamp(int(timedecode)).strftime('%Y-%m-%d'))
#Pandas linear regression prediction model
model = pd.ols(y=ppl, x=ptl)
modelx = model.beta['x']
modelintercept = model.beta['intercept']
#Price from linear reg
predictionpricelist = [utime * modelx + modelintercept for utime in ptrev]
predictionpricelist.insert(0,'Linear Regression')
#RSI Chart
rsipricelist = []
for rp in actualprice():
rsipricelist.append(rp)
for t in actualtime():
actualtimelist.append(t)
rsifirstdate = actualtimelist[14] #For reference, not to use
#RSI Calculation
rsilist = ['RSI 14'] #Array For JS
rsicount = 1 #Initialize
listhead = 0 #Get 15 values JIC
listtail = 14
for calcrsi in rsipricelist:
if rsicount < 354:
calclist = rsipricelist[listhead:listtail] #Pricelist array for RSI
pluslist = []
minuslist = []
rsix = 0
rsiy = 1
for i in xrange(14): #RSI calc start
if rsiy < 14:
rsia = calclist[rsix]
rsib = calclist[rsiy]
rsiz = rsib - rsia
if rsiz > 0.0:
pluslist.append(rsiz)
else:
minuslist.append(-rsiz)
rsix += 1
rsiy += 1
avggain = sum(pluslist) / 14.0
avgloss = sum(minuslist) / 14.0
rsia = 0
rsib = 0
rsiz = 0
rs = avggain / avgloss
rsi = 100 - 100 / (1 + rs)
rsilist.append(rsi)
rsicount += 1 #Increment count for next for-paragraph
listhead += 1
listtail += 1
del pluslist[:] #Initialize all lists that works for only python 2.7
del minuslist[:]
return render_template('chart.js', rsilist=rsilist, predictionpricelist=predictionpricelist, predictiontime=predictiontimelist_rev_decoded, modelx=modelx, modelintercept=modelintercept, actualtime=atl, actualprice=apl)
@app.route('/chart3')
def chart3():
predictiontimelist = []
predictiontimelist_rev = []
predictiontimelist_rev_decoded = ['PDate']
for ap in actualprice():
actualpricelist_rev.append(ap)
actualpricelist_rev.reverse()
aplrev = actualpricelist_rev[:3]
apl = actual + aplrev
ppl = pd.Series(aplrev)
for t in actualtime():
actualtimelist_rev.append(t)
actualtimelist_rev.reverse()
atrev = actualtimelist_rev[:3]
atl = date + atrev
for pt in predictiontime():
predictiontimelist_rev.append(pt) # + 172800
predictiontimelist_rev.reverse()
ptrev = predictiontimelist_rev[:3]
traintime = [] #Actual unix time for train data
traintimelist = [] #Actual unix time for train data
for tt in predictiontime():
traintime.append(tt)
traintime.reverse()
traintimelist = traintime[:3]
ptl = pd.Series(traintimelist)
for timedecode in ptrev:
predictiontimelist_rev_decoded.append(datetime.datetime.fromtimestamp(int(timedecode)).strftime('%Y-%m-%d'))
#Pandas linear regression prediction model
model = pd.ols(y=ppl, x=ptl)
modelx = model.beta['x']
modelintercept = model.beta['intercept']
#Price from linear reg
predictionpricelist = [utime * modelx + modelintercept for utime in ptrev]
predictionpricelist.insert(0,'Linear Regression')
#RSI Chart
rsipricelist = []
for rp in actualprice():
rsipricelist.append(rp)
for t in actualtime():
actualtimelist.append(t)
rsifirstdate = actualtimelist[14] #For reference, not to use
#RSI Calculation
rsilist = ['RSI 14'] #Array For JS
rsicount = 1 #Initialize
listhead = 0 #Get 15 values JIC
listtail = 14
for calcrsi in rsipricelist:
if rsicount < 354:
calclist = rsipricelist[listhead:listtail] #Pricelist array for RSI
pluslist = []
minuslist = []
rsix = 0
rsiy = 1
for i in xrange(14): #RSI calc start
if rsiy < 14:
rsia = calclist[rsix]
rsib = calclist[rsiy]
rsiz = rsib - rsia
if rsiz > 0.0:
pluslist.append(rsiz)
else:
minuslist.append(-rsiz)
rsix += 1
rsiy += 1
avggain = sum(pluslist) / 14.0
avgloss = sum(minuslist) / 14.0
rsia = 0
rsib = 0
rsiz = 0
rs = avggain / avgloss
rsi = 100 - 100 / (1 + rs)
rsilist.append(rsi)
rsicount += 1 #Increment count for next for-paragraph
listhead += 1
listtail += 1
del pluslist[:] #Initialize all lists that works for only python 2.7
del minuslist[:]
return render_template('chart.js', rsilist=rsilist, predictionpricelist=predictionpricelist, predictiontime=predictiontimelist_rev_decoded, modelx=modelx, modelintercept=modelintercept, actualtime=atl, actualprice=apl)
@app.route('/chart7')
def chart7():
predictiontimelist = []
predictiontimelist_rev = []
predictiontimelist_rev_decoded = ['PDate']
for ap in actualprice():
actualpricelist_rev.append(ap)
actualpricelist_rev.reverse()
aplrev = actualpricelist_rev[:7]
apl = actual + aplrev
ppl = pd.Series(aplrev)
for t in actualtime():
actualtimelist_rev.append(t)
actualtimelist_rev.reverse()
atrev = actualtimelist_rev[:7]
atl = date + atrev
for pt in predictiontime():
predictiontimelist_rev.append(pt) # + 518400
predictiontimelist_rev.reverse()
ptrev = predictiontimelist_rev[:7]
traintime = [] #Actual unix time for train data
traintimelist = [] #Actual unix time for train data
for tt in predictiontime():
traintime.append(tt)
traintime.reverse()
traintimelist = traintime[:7]
ptl = pd.Series(traintimelist)
for timedecode in ptrev:
predictiontimelist_rev_decoded.append(datetime.datetime.fromtimestamp(int(timedecode)).strftime('%Y-%m-%d'))
#Pandas linear regression prediction model
model = pd.ols(y=ppl, x=ptl)
modelx = model.beta['x']
modelintercept = model.beta['intercept']
#Price from linear reg
predictionpricelist = [utime * modelx + modelintercept for utime in ptrev]
predictionpricelist.insert(0,'Linear Regression')
#RSI Chart
rsipricelist = []
for rp in actualprice():
rsipricelist.append(rp)
for t in actualtime():
actualtimelist.append(t)
rsifirstdate = actualtimelist[14] #For reference, not to use
#RSI Calculation
rsilist = ['RSI 14'] #Array For JS
rsicount = 1 #Initialize
listhead = 0 #Get 15 values JIC
listtail = 14
for calcrsi in rsipricelist:
if rsicount < 354:
calclist = rsipricelist[listhead:listtail] #Pricelist array for RSI
pluslist = []
minuslist = []
rsix = 0
rsiy = 1
for i in xrange(14): #RSI calc start
if rsiy < 14:
rsia = calclist[rsix]
rsib = calclist[rsiy]
rsiz = rsib - rsia
if rsiz > 0.0:
pluslist.append(rsiz)
else:
minuslist.append(-rsiz)
rsix += 1
rsiy += 1
avggain = sum(pluslist) / 14.0
avgloss = sum(minuslist) / 14.0
rsia = 0
rsib = 0
rsiz = 0
rs = avggain / avgloss
rsi = 100 - 100 / (1 + rs)
rsilist.append(rsi)
rsicount += 1 #Increment count for next for-paragraph
listhead += 1
listtail += 1
del pluslist[:] #Initialize all lists that works for only python 2.7
del minuslist[:]
return render_template('chart.js', rsilist=rsilist, predictionpricelist=predictionpricelist, predictiontime=predictiontimelist_rev_decoded, modelx=modelx, modelintercept=modelintercept, actualtime=atl, actualprice=apl)
@app.route('/chart15')
def chart15():
predictiontimelist = []
predictiontimelist_rev = []
predictiontimelist_rev_decoded = ['PDate']
for ap in actualprice():
actualpricelist_rev.append(ap)
actualpricelist_rev.reverse()
aplrev = actualpricelist_rev[:15]
apl = actual + aplrev
ppl = pd.Series(aplrev)
for t in actualtime():
actualtimelist_rev.append(t)
actualtimelist_rev.reverse()
atrev = actualtimelist_rev[:15]
atl = date + atrev
for pt in predictiontime():
predictiontimelist_rev.append(pt) # + 1209600
predictiontimelist_rev.reverse()
ptrev = predictiontimelist_rev[:15]
traintime = [] #Actual unix time for train data
traintimelist = [] #Actual unix time for train data
for tt in predictiontime():
traintime.append(tt)
traintime.reverse()
traintimelist = traintime[:15]
ptl = pd.Series(traintimelist)
for timedecode in ptrev:
predictiontimelist_rev_decoded.append(datetime.datetime.fromtimestamp(int(timedecode)).strftime('%Y-%m-%d'))
#Pandas linear regression prediction model
model = pd.ols(y=ppl, x=ptl)
modelx = model.beta['x']
modelintercept = model.beta['intercept']
#Price from linear reg
predictionpricelist = [utime * modelx + modelintercept for utime in ptrev]
predictionpricelist.insert(0,'Linear Regression')
#RSI Chart
rsipricelist = []
for rp in actualprice():
rsipricelist.append(rp)
for t in actualtime():
actualtimelist.append(t)
rsifirstdate = actualtimelist[14] #For reference, not to use
#RSI Calculation
rsilist = ['RSI 14'] #Array For JS
rsicount = 1 #Initialize
listhead = 0 #Get 15 values JIC
listtail = 14
for calcrsi in rsipricelist:
if rsicount < 354:
calclist = rsipricelist[listhead:listtail] #Pricelist array for RSI
pluslist = []
minuslist = []
rsix = 0
rsiy = 1
for i in xrange(14): #RSI calc start
if rsiy < 14:
rsia = calclist[rsix]
rsib = calclist[rsiy]
rsiz = rsib - rsia
if rsiz > 0.0:
pluslist.append(rsiz)
else:
minuslist.append(-rsiz)
rsix += 1
rsiy += 1
avggain = sum(pluslist) / 14.0
avgloss = sum(minuslist) / 14.0
rsia = 0
rsib = 0
rsiz = 0
rs = avggain / avgloss
rsi = 100 - 100 / (1 + rs)
rsilist.append(rsi)
rsicount += 1 #Increment count for next for-paragraph
listhead += 1
listtail += 1
del pluslist[:] #Initialize all lists that works for only python 2.7
del minuslist[:]
return render_template('chart.js', rsilist=rsilist, predictionpricelist=predictionpricelist, predictiontime=predictiontimelist_rev_decoded, modelx=modelx, modelintercept=modelintercept, actualtime=atl, actualprice=apl)
@app.route('/chart30')
def chart30():
predictiontimelist = []
predictiontimelist_rev = []
predictiontimelist_rev_decoded = ['PDate']
for ap in actualprice():
actualpricelist_rev.append(ap)
actualpricelist_rev.reverse()
aplrev = actualpricelist_rev[:30]
apl = actual + aplrev
ppl = pd.Series(aplrev)
for t in actualtime():
actualtimelist_rev.append(t)
actualtimelist_rev.reverse()
atrev = actualtimelist_rev[:30]
atl = date + atrev
for pt in predictiontime():
predictiontimelist_rev.append(pt) # + 2505600
predictiontimelist_rev.reverse()
ptrev = predictiontimelist_rev[:30]
traintime = [] #Actual unix time for train data
traintimelist = [] #Actual unix time for train data
for tt in predictiontime():
traintime.append(tt)
traintime.reverse()
traintimelist = traintime[:30]
ptl = pd.Series(traintimelist)
for timedecode in ptrev:
predictiontimelist_rev_decoded.append(datetime.datetime.fromtimestamp(int(timedecode)).strftime('%Y-%m-%d'))
#Pandas linear regression prediction model
model = pd.ols(y=ppl, x=ptl)
modelx = model.beta['x']
modelintercept = model.beta['intercept']
#Price from linear reg
predictionpricelist = [utime * modelx + modelintercept for utime in ptrev]
predictionpricelist.insert(0,'Linear Regression')
#RSI Chart
rsipricelist = []
for rp in actualprice():
rsipricelist.append(rp)
for t in actualtime():
actualtimelist.append(t)
rsifirstdate = actualtimelist[14] #For reference, not to use
#RSI Calculation
rsilist = ['RSI 14'] #Array For JS
rsicount = 1 #Initialize
listhead = 0 #Get 15 values JIC
listtail = 14
for calcrsi in rsipricelist:
if rsicount < 354:
calclist = rsipricelist[listhead:listtail] #Pricelist array for RSI
pluslist = []
minuslist = []
rsix = 0
rsiy = 1
for i in xrange(14): #RSI calc start
if rsiy < 14:
rsia = calclist[rsix]
rsib = calclist[rsiy]
rsiz = rsib - rsia
if rsiz > 0.0:
pluslist.append(rsiz)
else:
minuslist.append(-rsiz)
rsix += 1
rsiy += 1
avggain = sum(pluslist) / 14.0
avgloss = sum(minuslist) / 14.0
rsia = 0
rsib = 0
rsiz = 0
rs = avggain / avgloss
rsi = 100 - 100 / (1 + rs)
rsilist.append(rsi)
rsicount += 1 #Increment count for next for-paragraph
listhead += 1
listtail += 1
del pluslist[:] #Initialize all lists that works for only python 2.7
del minuslist[:]
return render_template('chart.js', rsilist=rsilist, predictionpricelist=predictionpricelist, predictiontime=predictiontimelist_rev_decoded, modelx=modelx, modelintercept=modelintercept, actualtime=atl, actualprice=apl)
@app.route('/chart60')
def chart60():
predictiontimelist = []
predictiontimelist_rev = []
predictiontimelist_rev_decoded = ['PDate']
for ap in actualprice():
actualpricelist_rev.append(ap)
actualpricelist_rev.reverse()
aplrev = actualpricelist_rev[:60]
apl = actual + aplrev
ppl = pd.Series(aplrev)
for t in actualtime():
actualtimelist_rev.append(t)
actualtimelist_rev.reverse()
atrev = actualtimelist_rev[:60]
atl = date + atrev
for pt in predictiontime():
predictiontimelist_rev.append(pt) # + 5097600
predictiontimelist_rev.reverse()
ptrev = predictiontimelist_rev[:60]
traintime = [] #Actual unix time for train data
traintimelist = [] #Actual unix time for train data
for tt in predictiontime():
traintime.append(tt)
traintime.reverse()
traintimelist = traintime[:60]
ptl = pd.Series(traintimelist)
for timedecode in ptrev:
predictiontimelist_rev_decoded.append(datetime.datetime.fromtimestamp(int(timedecode)).strftime('%Y-%m-%d'))
#Pandas linear regression prediction model
model = pd.ols(y=ppl, x=ptl)
modelx = model.beta['x']
modelintercept = model.beta['intercept']
#Price from linear reg
predictionpricelist = [utime * modelx + modelintercept for utime in ptrev]
predictionpricelist.insert(0,'Linear Regression')
#RSI Chart
rsipricelist = []
for rp in actualprice():
rsipricelist.append(rp)
for t in actualtime():
actualtimelist.append(t)
rsifirstdate = actualtimelist[14] #For reference, not to use
#RSI Calculation
rsilist = ['RSI 14'] #Array For JS
rsicount = 1 #Initialize
listhead = 0 #Get 15 values JIC
listtail = 14
for calcrsi in rsipricelist:
if rsicount < 354:
calclist = rsipricelist[listhead:listtail] #Pricelist array for RSI
pluslist = []
minuslist = []
rsix = 0
rsiy = 1
for i in xrange(14): #RSI calc start
if rsiy < 14:
rsia = calclist[rsix]
rsib = calclist[rsiy]
rsiz = rsib - rsia
if rsiz > 0.0:
pluslist.append(rsiz)
else:
minuslist.append(-rsiz)
rsix += 1
rsiy += 1
avggain = sum(pluslist) / 14.0
avgloss = sum(minuslist) / 14.0
rsia = 0
rsib = 0
rsiz = 0
rs = avggain / avgloss
rsi = 100 - 100 / (1 + rs)
rsilist.append(rsi)
rsicount += 1 #Increment count for next for-paragraph
listhead += 1
listtail += 1
del pluslist[:] #Initialize all lists that works for only python 2.7
del minuslist[:]
return render_template('chart.js', rsilist=rsilist, predictionpricelist=predictionpricelist, predictiontime=predictiontimelist_rev_decoded, modelx=modelx, modelintercept=modelintercept, actualtime=atl, actualprice=apl)
@app.route('/chart90')
def chart90():
predictiontimelist = []
predictiontimelist_rev = []
predictiontimelist_rev_decoded = ['PDate']
for ap in actualprice():
actualpricelist_rev.append(ap)
actualpricelist_rev.reverse()
aplrev = actualpricelist_rev[:90]
apl = actual + aplrev
ppl = pd.Series(aplrev)
for t in actualtime():
actualtimelist_rev.append(t)
actualtimelist_rev.reverse()
atrev = actualtimelist_rev[:90]
atl = date + atrev
for pt in predictiontime():
predictiontimelist_rev.append(pt) # + 7689600
predictiontimelist_rev.reverse()
ptrev = predictiontimelist_rev[:90]
traintime = [] #Actual unix time for train data
traintimelist = [] #Actual unix time for train data
for tt in predictiontime():
traintime.append(tt)
traintime.reverse()
traintimelist = traintime[:90]
ptl = pd.Series(traintimelist)
for timedecode in ptrev:
predictiontimelist_rev_decoded.append(datetime.datetime.fromtimestamp(int(timedecode)).strftime('%Y-%m-%d'))
#Pandas linear regression prediction model
model = pd.ols(y=ppl, x=ptl)
modelx = model.beta['x']
modelintercept = model.beta['intercept']
#Price from linear reg
predictionpricelist = [utime * modelx + modelintercept for utime in ptrev]
predictionpricelist.insert(0,'Linear Regression')
#RSI Chart
rsipricelist = []
for rp in actualprice():
rsipricelist.append(rp)
for t in actualtime():
actualtimelist.append(t)
rsifirstdate = actualtimelist[14] #For reference, not to use
#RSI Calculation
rsilist = ['RSI 14'] #Array For JS
rsicount = 1 #Initialize
listhead = 0 #Get 15 values JIC
listtail = 14
for calcrsi in rsipricelist:
if rsicount < 354:
calclist = rsipricelist[listhead:listtail] #Pricelist array for RSI
pluslist = []
minuslist = []
rsix = 0
rsiy = 1
for i in xrange(14): #RSI calc start
if rsiy < 14:
rsia = calclist[rsix]
rsib = calclist[rsiy]
rsiz = rsib - rsia
if rsiz > 0.0:
pluslist.append(rsiz)
else:
minuslist.append(-rsiz)
rsix += 1
rsiy += 1
avggain = sum(pluslist) / 14.0
avgloss = sum(minuslist) / 14.0
rsia = 0
rsib = 0
rsiz = 0
rs = avggain / avgloss
rsi = 100 - 100 / (1 + rs)
rsilist.append(rsi)
rsicount += 1 #Increment count for next for-paragraph
listhead += 1
listtail += 1
del pluslist[:] #Initialize all lists that works for only python 2.7
del minuslist[:]
return render_template('chart.js', rsilist=rsilist, predictionpricelist=predictionpricelist, predictiontime=predictiontimelist_rev_decoded, modelx=modelx, modelintercept=modelintercept, actualtime=atl, actualprice=apl)
@app.route('/jpy', methods=['GET', 'POST'])
def jpy():
title = 'JPY Simple Converter'
name = request.form['name']
btc_amo = exchangerates.to_btc('JPY', name)
home = redirect(url_for('index'))
excsym = ticker['JPY'].symbol
excrat = ticker['JPY'].p15min
priceList = []
for item in ccyprice():
priceList.append(item)
usdmktprice = stats.market_price_usd
return render_template('index.html', usdmktprice=usdmktprice, excrat=excrat, excsym=excsym, home=home, name=name, btc_amo=btc_amo, ccyprice=priceList, ccylists=ccylists(), title=title)
#Conf
# if __name__ == '__main__':
# app.debug = True
# app.run(host='0.0.0.0')
| gpl-2.0 |
wegamekinglc/Finance-Python | PyFin/tests/Analysis/testTransformer.py | 2 | 7077 | # -*- coding: utf-8 -*-
u"""
Created on 2016-12-21
@author: cheng.li
"""
import unittest
import numpy as np
import pandas as pd
from PyFin.Analysis.SecurityValueHolders import SecurityLatestValueHolder
from PyFin.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingAverage
from PyFin.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingMax
from PyFin.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingMin
from PyFin.Analysis.transformer import transform
class TestTransformer(unittest.TestCase):
def test_transformer_with_category_name(self):
test_df = pd.DataFrame({'code': [1, 2, 3, 4, 1, 2, 3],
'b': [4, 5, 6, 7, 6, 5, 4],
'c': [9, 8, 7, 6, 5, 4, 3]},
index=[1, 1, 1, 1, 2, 2, 2],
dtype=float)
expression = SecurityMovingMax(2, 'b') + SecurityMovingMin(2, 'c')
calculated = transform(test_df, [expression], cols=['user_factor'], category_field='code')
expected = [13., 13., 13., 13., 11., 9, 9.]
np.testing.assert_array_almost_equal(calculated['user_factor'], expected)
def test_transformer_without_category_name(self):
test_df = pd.DataFrame({'b': [4, 5, 6, 7, 6, 5, 4],
'c': [9, 8, 7, 6, 5, 4, 3]},
index=[1, 2, 3, 4, 5, 6, 7],
dtype=float)
expression = SecurityMovingMax(20, 'b') + SecurityMovingMin(20, 'c')
calculated = transform(test_df, [expression], cols=['user_factor'])
expected = [13., 13., 13., 13., 12., 11., 10.]
np.testing.assert_array_almost_equal(calculated['user_factor'], expected)
def test_transformer_with_multiple_expression(self):
test_df = pd.DataFrame({'code': [1, 2, 3, 4, 1, 2, 3],
'b': [4, 5, 6, 7, 6, 5, 4],
'c': [9, 8, 7, 6, 5, 4, 3]},
index=[1, 1, 1, 1, 2, 2, 2],
dtype=float)
expression1 = SecurityMovingMax(20, 'b')
expression2 = SecurityMovingMin(20, 'c')
expression3 = SecurityMovingMax(20, 'b') + SecurityMovingMin(20, 'c')
calculated = transform(test_df,
[expression1, expression2, expression3],
cols=['factor1', 'factor2', 'factor3'],
category_field='code')
expected = [13., 13., 13., 13., 11., 9, 9.]
np.testing.assert_array_almost_equal(calculated['factor3'], expected)
np.testing.assert_array_almost_equal(calculated['factor1'] + calculated['factor2'], calculated['factor3'])
def test_transformer_with_multiple_mixed_expression(self):
test_df = pd.DataFrame({'code': [1, 2, 3, 4, 1, 2, 3],
'b': [4, 5, 6, 7, 6, 5, 4],
'c': [9, 8, 7, 6, 5, 4, 3]},
index=[1, 1, 1, 1, 2, 2, 2],
dtype=float)
expression1 = 'b'
expression2 = SecurityMovingMax(20, 'b') + SecurityMovingMin(20, 'c')
calculated = transform(test_df,
[expression1, expression2],
cols=['b', 'factor2'],
category_field='code')
expected = [13., 13., 13., 13., 11., 9, 9.]
np.testing.assert_array_almost_equal(calculated['b'], test_df['b'])
np.testing.assert_array_almost_equal(calculated['factor2'], expected)
def test_transformer_with_category_group_totally_different(self):
test_df = pd.DataFrame({'code': [1, 2, 3, 4, 5, 6, 7],
'b': [4, 5, 6, 7, 6, 5, 4],
'c': [9, 8, 7, 6, 5, 4, 3]},
index=[1, 1, 1, 1, 2, 2, 2],
dtype=float)
expression = SecurityMovingAverage(2, 'b')
calculated = transform(test_df,
[expression],
cols=['ma'],
category_field='code')
expected = [4., 5., 6., 7., 6., 5., 4.]
np.testing.assert_array_almost_equal(calculated['ma'], expected)
def test_transformer_with_filter_value_holder(self):
test_df = pd.DataFrame({'code': [1, 2, 3, 4, 1, 2, 3],
'b': [4, 5, 6, 7, 6, 5, 4],
'c': [9, 8, 7, 6, 5, 4, 3]},
index=[1, 1, 1, 1, 2, 2, 2],
dtype=float)
value_holder = SecurityLatestValueHolder('b')
filter = SecurityLatestValueHolder('b') >= 5
filtered_value_holder = value_holder[filter]
calculated = transform(test_df,
[filtered_value_holder],
cols=['filtered_b'],
category_field='code')
self.assertTrue(np.all(calculated['filtered_b'] >= 5))
expected = test_df[test_df.b >= 5]
np.testing.assert_array_almost_equal(expected.b, calculated['filtered_b'])
np.testing.assert_array_almost_equal(expected.code, calculated['code'])
def test_transformer_with_category_value(self):
test_df = pd.DataFrame({'code': [1, 2, 3, 4, 1, 2, 3],
'b': [4, 5, 6, 7, 6, 5, 4],
'c': [9, 8, 7, 6, 5, 4, 3]},
index=[1, 1, 1, 1, 2, 2, 2],
dtype=float)
value_holder = SecurityLatestValueHolder('b')
filter = SecurityLatestValueHolder('b') >= 5
filtered_value_holder = value_holder[filter]
calculated = transform(test_df,
[filtered_value_holder],
cols=['filtered_b'],
category_field='code')
self.assertTrue(np.all(calculated['filtered_b'] >= 5))
expected = test_df[test_df.b >= 5]
np.testing.assert_array_almost_equal(expected.b, calculated['filtered_b'])
np.testing.assert_array_equal(expected.code, calculated['code'])
@staticmethod
def test_transformer_with_category_for_comparing():
test_df = pd.DataFrame({'code': [1, 2, 3, 4, 1, 2, 3],
'b': [4, 5, 6, 7, 6, 5, 4],
'c': [9, 8, 7, 6, 5, 4, 3]},
index=[1, 1, 1, 1, 2, 2, 2],
dtype=float)
value_holder = SecurityLatestValueHolder('b') > 4.
calculated = transform(test_df,
[value_holder],
cols=['filter'],
category_field='code')
expected = [0., 1., 1., 1., 1., 1., 0.]
np.testing.assert_array_almost_equal(expected, calculated['filter'])
| mit |
fbagirov/scikit-learn | examples/covariance/plot_covariance_estimation.py | 250 | 5070 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.grid_search import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
YosefLab/scVI | scvi/model/linear_scvi.py | 1 | 4076 | import logging
import pandas as pd
from anndata import AnnData
from scvi._compat import Literal
from scvi.core.data_loaders import ScviDataLoader
from scvi.core.models import BaseModelClass, RNASeqMixin, VAEMixin
from scvi.core.modules import LDVAE
from scvi.core.trainers import UnsupervisedTrainer
from scvi.model._utils import _get_var_names_from_setup_anndata
logger = logging.getLogger(__name__)
class LinearSCVI(RNASeqMixin, VAEMixin, BaseModelClass):
"""
Linearly-decoded VAE [Svensson20]_.
Parameters
----------
adata
AnnData object that has been registered via :func:`~scvi.data.setup_anndata`.
n_hidden
Number of nodes per hidden layer.
n_latent
Dimensionality of the latent space.
n_layers
Number of hidden layers used for encoder NN.
dropout_rate
Dropout rate for neural networks.
dispersion
One of the following:
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
gene_likelihood
One of:
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
* ``'poisson'`` - Poisson distribution
latent_distribution
One of:
* ``'normal'`` - Normal distribution
* ``'ln'`` - Logistic normal distribution (Normal(0, I) transformed by softmax)
use_cuda
Use the GPU or not.
**model_kwargs
Keyword args for :class:`~scvi.core.modules.LDVAE`
Examples
--------
>>> adata = anndata.read_h5ad(path_to_anndata)
>>> scvi.data.setup_anndata(adata, batch_key="batch")
>>> vae = scvi.model.LinearSCVI(adata)
>>> vae.train()
>>> adata.var["loadings"] = vae.get_loadings()
"""
def __init__(
self,
adata: AnnData,
n_hidden: int = 128,
n_latent: int = 10,
n_layers: int = 1,
dropout_rate: float = 0.1,
dispersion: Literal["gene", "gene-batch", "gene-label", "gene-cell"] = "gene",
gene_likelihood: Literal["zinb", "nb", "poisson"] = "nb",
latent_distribution: Literal["normal", "ln"] = "normal",
use_cuda: bool = True,
**model_kwargs,
):
super(LinearSCVI, self).__init__(adata, use_cuda=use_cuda)
self.model = LDVAE(
n_input=self.summary_stats["n_vars"],
n_batch=self.summary_stats["n_batch"],
n_hidden=n_hidden,
n_latent=n_latent,
n_layers_encoder=n_layers,
dropout_rate=dropout_rate,
dispersion=dispersion,
gene_likelihood=gene_likelihood,
latent_distribution=latent_distribution,
**model_kwargs,
)
self._model_summary_string = (
"LinearSCVI Model with the following params: \nn_hidden: {}, n_latent: {}, n_layers: {}, dropout_rate: "
"{}, dispersion: {}, gene_likelihood: {}, latent_distribution: {}"
).format(
n_hidden,
n_latent,
n_layers,
dropout_rate,
dispersion,
gene_likelihood,
latent_distribution,
)
self.n_latent = n_latent
self.init_params_ = self._get_init_params(locals())
@property
def _trainer_class(self):
return UnsupervisedTrainer
@property
def _scvi_dl_class(self):
return ScviDataLoader
def get_loadings(self) -> pd.DataFrame:
"""
Extract per-gene weights in the linear decoder.
Shape is genes by `n_latent`.
"""
cols = ["Z_{}".format(i) for i in range(self.n_latent)]
var_names = _get_var_names_from_setup_anndata(self.adata)
loadings = pd.DataFrame(
self.model.get_loadings(), index=var_names, columns=cols
)
return loadings
| bsd-3-clause |
supriyantomaftuh/syzygy | third_party/numpy/files/numpy/lib/twodim_base.py | 70 | 23431 | """ Basic functions for manipulating 2d arrays
"""
__all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu',
'tril','vander','histogram2d','mask_indices',
'tril_indices','tril_indices_from','triu_indices','triu_indices_from',
]
from numpy.core.numeric import asanyarray, equal, subtract, arange, \
zeros, greater_equal, multiply, ones, asarray, alltrue, where, \
empty
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Does not require the array to be
two-dimensional.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1,...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0,1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0,1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triange of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n,n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
if k >= s[1]:
return empty(0, dtype=v.dtype)
if v.flags.f_contiguous:
# faster slicing
v, k, s = v.T, -k, s[::-1]
if k >= 0:
i = k
else:
i = (-k) * s[1]
return v[:s[1]-k].flat[i::s[1]+1]
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n,n), v.dtype)
if (k >= 0):
i = arange(0,n-k)
fi = i+k+i*n
else:
i = arange(0,n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
T : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal(subtract.outer(arange(N), arange(M)),-k)
return m.astype(dtype)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
L : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
out = multiply(tri(m.shape[0], m.shape[1], k=k, dtype=int),m)
return out
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
out = multiply((1 - tri(m.shape[0], m.shape[1], k - 1, int)), m)
return out
# borrowed from John Hunter and matplotlib
def vander(x, N=None):
"""
Generate a Van der Monde matrix.
The columns of the output matrix are decreasing powers of the input
vector. Specifically, the `i`-th output column is the input vector
raised element-wise to the power of ``N - i - 1``. Such a matrix with
a geometric progression in each row is named for Alexandre-Theophile
Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Order of (number of columns in) the output. If `N` is not specified,
a square array is returned (``N = len(x)``).
Returns
-------
out : ndarray
Van der Monde matrix of order `N`. The first column is ``x^(N-1)``,
the second ``x^(N-2)`` and so forth.
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if N is None:
N=len(x)
X = ones( (len(x),N), x.dtype)
for i in range(N - 1):
X[:,i] = x**(N - i - 1)
return X
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape(N,)
A sequence of values to be histogrammed along the first dimension.
y : array_like, shape(M,)
A sequence of values to be histogrammed along the second dimension.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If [int, int], the number of bins in each dimension (nx, ny = bins).
* If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins).
* If [array, array], the bin edges in each dimension (x_edges, y_edges = bins).
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, i.e. the bin count divided by the bin area.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights
are normalized to 1 if `normed` is True. If `normed` is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram: 1D histogram
histogramdd: Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample density,
defined such that:
.. math::
\\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1
where `H` is the histogram array and :math:`\\Delta x_i \\Delta y_i`
the area of bin `{i,j}`.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abcissa and `y` values on the ordinate axis.
Rather, `x` is histogrammed along the first dimension of the array
(vertical), and `y` along the second dimension of the array (horizontal).
This ensures compatibility with `histogramdd`.
Examples
--------
>>> x, y = np.random.randn(2, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 8))
>>> H.shape, xedges.shape, yedges.shape
((5, 8), (6,), (9,))
We can now use the Matplotlib to visualize this 2-dimensional histogram:
>>> extent = [yedges[0], yedges[-1], xedges[-1], xedges[0]]
>>> import matplotlib.pyplot as plt
>>> plt.imshow(H, extent=extent, interpolation='nearest')
<matplotlib.image.AxesImage object at ...>
>>> plt.colorbar()
<matplotlib.colorbar.Colorbar instance at ...>
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x,y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n,n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0):
"""
Return the indices for the lower-triangle of an (n, n) array.
Parameters
----------
n : int
The row dimension of the square arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return mask_indices(n, tril, k)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
raise ValueError("input array must be 2-d and square")
return tril_indices(arr.shape[0], k)
def triu_indices(n, k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return mask_indices(n, triu, k)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of an (n, n) array.
See `triu_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `triu` for details).
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
raise ValueError("input array must be 2-d and square")
return triu_indices(arr.shape[0],k)
| apache-2.0 |
Carralex/landlab | landlab/ca/celllab_cts.py | 4 | 86530 | #! /usr/env/python
"""
Landlab's Continuous-Time Stochastic (CTS) cellular automata modeling package.
Overview
--------
A CellLab CTS model implements a particular type of cellular
automaton (CA): a continuous-time stochastic CA. The approach is based on that
of Narteau et al. (2002, 2009) and Rozier and Narteau (2014). Like a normal
CA, the domain consists of a lattice of cells, each of which has a discrete
state. Unlike a conventional CA, the updating process is stochastic, and takes
place in continuous rather than discrete time. Any given pair (or "doublet")
of adjacent cell states has a certain specified probability of transition to a
different pair of states. The transition probability is given in the form of an
average *transition rate*, :math:`\lambda` (with dimensions of 1/T); the actual
time of transition is a random variable drawn from an exponential probability
distribution with mean :math:`1/\lambda`.
Subclasses
----------
Landlab provides for several different lattice and connection types:
- RasterCTS: regular raster grid with transitions between horizontal and
vertical cell pairs
- OrientedRasterCTS: like a RasterLCA, but different transition rates can
be assigned to vertical and horizontal pairs. This property of
orientation can be used, for example, to implement rules representing
gravitational attraction, or flow of a fluid with a particular
direction.
- RasterD8CTS: like a RasterLCA, but includes diagonal as well as vertical
and horizontal cell pairs.
- OrientedRasterD8CTS: as above but orientation also matters.
- HexCTS: hexagonal grid
- OrientedHexCTS: hexagonal grid, with transition rates allowed to vary
according to orientation.
Encoding of "states"
--------------------
As in any traditional cellular automaton model, a LandlabCellularAutomaton
contains a grid of cells ("nodes" in Landlab parlance), each of which is has a
discrete state. States are represented by integers (0, 1, ... N).
In addition, every active link has an *orientation code* and a *link state
code*. The orientation code represents the orientation of the link in space: is
it "vertical" (aligned with the y axis), "horizontal" (aligned with x), or in
some other orientation? The number of possible orientations depends on the
subclass. The base class has only one orientation code (0) (meaning
"orientation doesn't matter), but this is overridden in some of the subclasses.
For example, the OrientedRasterLCA has two orientation codes (0 and 1, for
vertical and horizontal), while the OrientedHexLCA has three (representing the
three axes in a hex-cell / triagonal grid).
Each active link also has a *link state code*. The *state* of a link refers to
its particular combination of nodes and its orientation. For example, link
state 1 refers to a link in which the tail-node has state 0, the head-node has
state 1, and the orientation code is 0. The number of possible link states is
equal to R N^2, where R is the number of orientations (1 to 3, depending on the
subclass) and N is the number of possible node states. The simplest possible
Landlab CA model would have just one orientation code and two possible cell
states, so that there are four unique link states. These would be represented
by the tuples of (tail-node state, head-node state, orientation) as follows::
link state 0 = (0, 0, 0)
link state 1 = (0, 1, 0)
link state 2 = (1, 0, 0)
link state 3 = (1, 1, 0)
Main data structures
--------------------
node_state : 1d array (x number of nodes in grid)
Node-based grid of node-state codes. This is the grid of cell (sic) states.
link_state_dict : dictionary
Keys are 3-element tuples that represent the cell-state pairs and
orientation code for each possible link type; values are the corresponding
link-state codes. Allows you to look up the link-state code corresponding
to a particular pair of adjacent nodes with a particular orientation.
node_pair : list (x number of possible link states)
List of 3-element tuples representing all the various link states. Allows
you to look up the node states and orientation corresponding to a
particular link-state ID.
event_queue : heap of Event objects
Queue containing all future transition events, sorted by time of occurrence
(from soonest to latest).
next_update : 1d array (x number of active links)
Time (in the future) at which the link will undergo its next transition.
You might notice that the update time for every scheduled transition is
also stored in each Event object in the event queue. Why store it twice?
Because a scheduled event might be invalidated after the event has been
scheduled (because another transition has changed one of a link's two
nodes, for example). The way to tell whether a scheduled event is still
valid is to compare its time with the corresponding transition time in the
*next_update* array. If they are different, the event is discarded.
link_orientation : 1d array of ints (x number of active links)
Orientation code for each link.
link_state : 1d array of ints (x number of active links)
State code for each link.
n_xn : 1d array of ints (x number of possible link states)
Number of transitions ("xn" stands for "transition") from a given link
state.
xn_to : 2d array of ints (# possible link states x max. # transitions)
Stores the link-state code(s) to which a particular link state can
transition. "max. # transitions" means the maximum number of transitions
from a single state. For example, if each link state is associated with one
and only one transition, then the maximum is 1, but if there is at least
one link state that can have either of two different transitions, then the
maximum would be two.
xn_rate : 2d array of floats (# possible link states x max. # transitions)
Rate associated with each link-state transition.
Created GT Sep 2014, starting from link_cap.py.
"""
from __future__ import print_function
from _heapq import heappush
from _heapq import heappop
from _heapq import heapify
import landlab
import numpy as np
import pylab as plt
_USE_CYTHON = False
_CYTEST = True
_RUN_NEW = True
_TESTING = True
if _TESTING:
from .cfuncs import PriorityQueue
if _USE_CYTHON:
from .cfuncs import (update_link_states_and_transitions,
run_cts, run_cts_lean, PriorityQueue)
if _CYTEST:
from landlab.ca.cfuncs import (update_node_states,
push_transitions_to_event_queue_new,
do_transition_new,
update_link_states_and_transitions_new,
run_cts_new)
_NEVER = 1e50
_DEBUG = False
_TEST = False
_CORE = landlab.grid.base.CORE_NODE
class Transition(object):
"""A transition from one state to another.
Represents a transition from one state ("from_state") to another
("to_state") at a link. The transition probability is represented by a rate
parameter "rate", with dimensions of 1/T. The probability distribution of
time until the transition event occurs is exponentional with mean 1/rate.
The optional name parameter allows the caller to assign a name to any given
transition.
Note that from_state and to_state can now be either integer IDs for the
standardised ordering of the link states (as before), or tuples explicitly
describing the node state at each end, and the orientation.
Orientation is 0: horizontal, L-R; 1: vertical, bottom-top.
For such a tuple, order is (left/bottom, right/top, orientation).
Transition() constructor sets 3 required properties and 2 optional
properties for a transition from one cell pair to another.
Parameters
----------
from_state : int
Code for the starting state of the cell pair (link)
to_state : int
Code for the new state of the cell pair (link)
rate : float
Average rate at which this transition occurs (dimension of 1/time)
name : string (optional)
Name for this transition
swap_properties : bool (optional)
Flag: should properties be exchanged between the two cells?
"""
def __init__(self, from_state, to_state, rate, name=None,
swap_properties=False, prop_update_fn=None):
"""
Transition() constructor sets 3 required properties and 2 optional
properties for a transition from one cell pair to another.
Parameters
----------
from_state : int
Code for the starting state of the cell pair (link)
to_state : int
Code for the new state of the cell pair (link)
rate : float
Average rate at which this transition occurs (dimension of 1/time)
name : string (optional)
Name for this transition
swap_properties : bool (optional)
Flag: should properties be exchanged between the two cells?
"""
self.from_state = from_state
self.to_state = to_state
self.rate = rate
self.name = name
self.swap_properties = swap_properties
self.prop_update_fn = prop_update_fn
if _USE_CYTHON:
from .cfuncs import Event
else:
class Event(object):
"""Transition event at a link.
Represents a transition event at a link. The transition occurs at a
given link and a given time, and it involves a transition into the
state xn_to (an integer code representing the new link state;
"xn" is shorthand for "transition").
The class overrides the __lt__ (less than operator) method so that when
Event() objects are placed in a PriorityQueue, the earliest event is
given the highest priority (i.e., placed at the top of the queue).
Event() constructor sets 3 required properties and one optional
property.
Parameters
----------
time : float
Time at which the event is scheduled to occur
link : int
ID of the link at which event occurs
xn_to : int
New state to which this cell pair (link) will transition
propswap : bool (optional)
Flag: does this event involve an exchange of properties between
the two cells?
Examples
--------
>>> from landlab.ca.celllab_cts import Event
>>> e1 = Event( 10.0, 1, 2)
>>> e2 = Event( 2.0, 3, 1)
>>> e1 < e2
False
>>> e2 < e1
True
"""
def __init__(self, time, link, xn_to, propswap=False, prop_update_fn=None):
"""
Event() constructor sets 3 required properties and one optional
property.
Parameters
----------
time : float
Time at which the event is scheduled to occur
link : int
ID of the link at which event occurs
xn_to : int
New state to which this cell pair (link) will transition
propswap : bool (optional)
Flag: does this event involve an exchange of properties between
the two cells?
"""
self.time = time
self.link = link
self.xn_to = xn_to
self.propswap = propswap
self.prop_update_fn = prop_update_fn
def __lt__(self, other):
"""
Overridden less-than operator: returns true if the event on the left
has an earlier scheduled time than the event on the right
"""
return self.time < other.time
class CAPlotter(object):
"""Handle display of a CellLab-CTS grid.
CAPlotter() constructor keeps a reference to the CA model, and
optionally a colormap to be used with plots.
Parameters
----------
ca : LandlabCellularAutomaton object
Reference to a CA model
cmap : Matplotlib colormap, optional
Colormap to be used in plotting
"""
def __init__(self, ca, cmap=None):
"""
CAPlotter() constructor keeps a reference to the CA model, and
optionally a colormap to be used with plots.
Parameters
----------
ca : LandlabCellularAutomaton object
Reference to a CA model
cmap : Matplotlib colormap, optional
Colormap to be used in plotting
"""
import matplotlib
# Set the colormap; default to matplotlib's "jet" colormap
if cmap is None:
self._cmap = matplotlib.cm.jet
else:
self._cmap = cmap
# Keep a reference to the CA model
self.ca = ca
# Initialize the plot and remember the grid type
plt.ion()
plt.figure(1)
if type(ca.grid) is landlab.grid.hex.HexModelGrid:
self.gridtype = 'hex'
else:
self.gridtype = 'rast'
def update_plot(self):
"""Plot the current node state grid."""
plt.clf()
if self.gridtype == 'rast':
nsr = self.ca.grid.node_vector_to_raster(self.ca.node_state)
plt.imshow(nsr, interpolation='None',
origin='lower', cmap=self._cmap)
else:
self.ca.grid.hexplot(self.ca.node_state, color_map=self._cmap)
plt.draw()
plt.pause(0.001)
def finalize(self):
"""Wrap up plotting.
Wrap up plotting by switching off interactive model and showing the
plot.
"""
plt.ioff()
plt.show()
class CellLabCTSModel(object):
"""Link-type (or doublet-type) cellular automaton model.
A CellLabCTSModel implements a link-type (or doublet-type) cellular
automaton model. A link connects a pair of cells. Each cell has a state
(represented by an integer code), and each link also has a state that is
determined by the states of the cell pair.
Parameters
----------
model_grid : Landlab ModelGrid object
Reference to the model's grid
node_state_dict : dict
Keys are node-state codes, values are the names associated with
these codes
transition_list : list of Transition objects
List of all possible transitions in the model
initial_node_states : array of ints (x number of nodes in grid)
Starting values for node-state grid
prop_data : array (x number of nodes in grid), optional
Array of properties associated with each node/cell
prop_reset_value : number or object, optional
Default or initial value for a node/cell property (e.g., 0.0).
Must be same type as *prop_data*.
"""
def __init__(self, model_grid, node_state_dict, transition_list,
initial_node_states, prop_data=None, prop_reset_value=None,
seed=0):
"""Initialize the CA model.
Parameters
----------
model_grid : Landlab ModelGrid object
Reference to the model's grid
node_state_dict : dict
Keys are node-state codes, values are the names associated with
these codes
transition_list : list of Transition objects
List of all possible transitions in the model
initial_node_states : array of ints (x number of nodes in grid)
Starting values for node-state grid
prop_data : array (x number of nodes in grid), optional
Array of properties associated with each node/cell
prop_reset_value : number or object, optional
Default or initial value for a node/cell property (e.g., 0.0).
Must be same type as *prop_data*.
seed : int, optional
Seed for random number generation.
"""
# Are we calling this from a subclass __init__? If so, then the
# variable self.number_of_orientations should already be defined.
try:
self.number_of_orientations == 1
except AttributeError:
# if self.number_of_orientations not already defined
self.number_of_orientations = 1
# Keep a copy of the model grid; remember how many active links in it
self.grid = model_grid
# Initialize random number generation
np.random.seed(seed)
# Create an array that knows which links are connected to a boundary
# node
self.bnd_lnk = np.zeros(self.grid.number_of_links, dtype=np.int8)
for link_id in range(self.grid.number_of_links):
if self.grid.status_at_node[self.grid.node_at_link_tail[link_id]] != _CORE or self.grid.status_at_node[self.grid.node_at_link_head[link_id]] != _CORE:
self.bnd_lnk[link_id] = True
# Set up the initial node-state grid
self.set_node_state_grid(initial_node_states)
# Current simulation time starts out at zero
self.current_time = 0.0
# Figure out how many states there are, and make sure the input data
# are self consistent.
# There are 2 x (N^2) link states, where N is the number of node
# states. For example, if there are just two node states, 0 and 1, then
# the possible oriented link pairs are listed below:
# 0-0 0-1 1-0 1-1 0 0 1 1
# 0 1 0 1
self.num_node_states = len(node_state_dict)
self.num_node_states_sq = self.num_node_states * self.num_node_states
self.num_link_states = (self.number_of_orientations *
self.num_node_states_sq)
assert type(transition_list) is list, 'transition_list must be a list!'
assert (transition_list), \
'Transition list must contain at least one transition'
last_type = None
for t in transition_list:
try:
assert (t.from_state < self.num_link_states), \
'Transition from_state out of range'
assert (t.to_state < self.num_link_states), \
'Transition to_state out of range'
this_type = int
# TODO: make orientation optional for cases where
# self.number_of_orientations = 1
except:
# added to allow from and to states to be tuples, not just ids
assert type(t.from_state) == tuple, \
'Transition from_state out of range'
assert type(t.to_state) == tuple, \
'Transition to_state out of range'
for i in t.from_state[:-1]:
assert (i < self.num_node_states), \
'Transition from_state out of range'
for i in t.to_state[:-1]:
assert (i < self.num_node_states), \
'Transition to_state out of range'
assert t.from_state[-1] < self.number_of_orientations, \
'Encoding for orientation in from_state must be < number of orientations.'
assert t.to_state[-1] < self.number_of_orientations, \
'Encoding for orientation in to_state must be < number of orientations.'
this_type = tuple
assert last_type == this_type or last_type == None, \
'All transition types must be either int IDs, or all tuples.'
# this test to ensure all entries are either IDs, or tuples, not
# mixed
last_type = this_type
# Create priority queue for events and next_update array for links
self.event_queue = []
heapify(self.event_queue)
self.next_update = self.grid.add_zeros('link', 'next_update_time')
self.priority_queue = PriorityQueue()
self.next_trn_id = -np.ones(self.grid.number_of_links, dtype=np.int)
# Assign link types from node types
self.create_link_state_dict_and_pair_list()
# DEJH adds: convert transition_list to IDs if necessary
# This is the new part that allows Transition from_ and to_ types
# to be specified either as ints, or as tuples.
transition_list_as_ID = transition_list[:]
if type(transition_list[0].from_state) == tuple:
#(then they all are..., because of the assertions in __init__)
for i in range(len(transition_list)):
transition_list_as_ID[i].from_state = self.link_state_dict[
transition_list[i].from_state]
transition_list_as_ID[i].to_state = self.link_state_dict[
transition_list[i].to_state]
# Set up the information needed to determine the orientation of links
# in the lattice. The default method just creates an array of zeros
# (all orientations considered the same), but this will be overridden
# in subclasses that do use orientation.
self.setup_array_of_orientation_codes()
# Using the grid of node states, figure out all the link states
self.assign_link_states_from_node_types()
# Create transition data for links
self.setup_transition_data(transition_list_as_ID)
# Put the various transitions on the event queue
if _CYTEST:
push_transitions_to_event_queue_new(self.grid.number_of_active_links,
self.grid.active_links,
self.n_trn, self.link_state,
self.trn_id, self.trn_rate,
self.next_update,
self.next_trn_id,
self.priority_queue)
elif _RUN_NEW:
self.push_transitions_to_event_queue_new()
else:
self.push_transitions_to_event_queue()
# In order to keep track of cell "properties", we create an array of
# indices that refer to locations in the caller's code where properties
# are tracked.
self.propid = np.arange(self.grid.number_of_nodes)
if prop_data is None:
self.prop_data = np.zeros(self.grid.number_of_nodes)
self.prop_reset_value = 0.0
else:
self.prop_data = prop_data
self.prop_reset_value = prop_reset_value
# Determine and remember whether we will handle property swaps and/or
# callbacks in this model.
if np.amax(self.xn_propswap) > 0:
self._use_propswap_or_callback = True
else:
self._use_propswap_or_callback = False
def set_node_state_grid(self, node_states):
"""Set the grid of node-state codes to node_states.
Sets the grid of node-state codes to node_states. Also checks
to make sure node_states is in the proper format, which is to
say, it's a Numpy array of the same length as the number of nodes in
the grid.
**Creates**:
* self.node_state : 1D array of ints (x number of nodes in grid)
The node-state array
Parameters
----------
node_states : 1D array of ints (x number of nodes in grid)
Notes
-----
The node-state array is attached to the grid as a field with the name
'node_state'.
"""
assert (type(node_states) is np.ndarray), \
'initial_node_states must be a Numpy array'
assert (len(node_states) == self.grid.number_of_nodes), \
'length of initial_node_states must equal number of nodes in grid'
self.grid.at_node['node_state'] = node_states
self.node_state = node_states
def create_link_state_dict_and_pair_list(self):
"""Create a dict of link-state to node-state.
Creates a dictionary that can be used as a lookup table to find out
which link state corresponds to a particular pair of node states. The
dictionary keys are 3-element tuples, each of which represents the
state of the TAIL node, the HEAD node, and the orientation of the link.
The values are integer codes representing the link state numbers.
Notes
-----
Performance note: making self.node_pair a tuple does not appear to
change time to lookup values in update_node_states. Changing it to a
2D array of int actually slows it down.
"""
self.link_state_dict = {}
self.node_pair = []
k = 0
for orientation in range(self.number_of_orientations):
for tail_state in range(self.num_node_states):
for head_state in range(self.num_node_states):
self.link_state_dict[
(tail_state, head_state, orientation)] = k
self.node_pair.append(
(tail_state, head_state, orientation))
k += 1
if False and _DEBUG:
print()
print('create_link_state_dict_and_pair_list(): dict is:')
print((self.link_state_dict))
print(' and the pair list is:')
print((self.node_pair))
def setup_array_of_orientation_codes(self):
"""Create array of active link orientation codes.
Creates and configures an array that contain the orientation code for
each active link (and corresponding cell pair).
**creates**:
* ``self.active_link_orientation`` : 1D numpy array
Notes
-----
The setup varies depending on the type of LCA. The default is
non-oriented, in which case we just have an array of zeros. Subclasses
will override this method to handle lattices in which orientation
matters (for example, vertical vs. horizontal in an OrientedRasterLCA).
"""
self.link_orientation = np.zeros(
self.grid.number_of_links, dtype=np.int8)
def assign_link_states_from_node_types(self):
"""Assign link-state code for each link.
Takes lists/arrays of "tail" and "head" node IDs for each link, and a
dictionary that associates pairs of node states (represented as a
3-element tuple, comprising the TAIL state, FROM state, and
orientation) to link states.
**creates**:
* ``self.link_state`` : 1D numpy array
"""
self.link_state = np.zeros(self.grid.number_of_links, dtype=int)
for i in self.grid.active_links:
orientation = self.link_orientation[i]
node_pair = (self.node_state[self.grid.node_at_link_tail[i]],
self.node_state[self.grid.node_at_link_head[i]],
orientation)
self.link_state[i] = self.link_state_dict[node_pair]
if False and _DEBUG:
print()
print('assign_link_states_from_node_types(): the link state array is:')
print((self.link_state))
def setup_transition_data(self, xn_list):
"""Create transition data arrays.
PREVIOUS METHOD:
Using the transition list and the number of link states, creates
three arrays that collectively contain data on state transitions:
* ``n_xn``: for each link state, contains the number of transitions out
of that state.
* ``xn_to``: 2D array that records, for each link state and each
transition, the new state into which the link transitions.
* ``xn_rate``: 2D array that records, for each link state and each
transition, the rate (1/time) of the transition.
* ``xn_propswap``: 2D array that indicates, for each link state and
each transition, whether that transition is accompanied by a
"property" swap, in which the two cells exchange properties (in
order to represent a particle moving)
NEW METHOD:
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.ca.celllab_cts import Transition
>>> from landlab.ca.oriented_raster_cts import OrientedRasterCTS
>>> import numpy as np
>>> grid = RasterModelGrid((3, 4))
>>> nsd = {0 : 'zero', 1 : 'one'}
>>> trn_list = []
>>> trn_list.append(Transition((0, 1, 0), (1, 0, 0), 1.0))
>>> trn_list.append(Transition((1, 0, 0), (0, 1, 0), 2.0))
>>> trn_list.append(Transition((0, 1, 1), (1, 0, 1), 3.0))
>>> trn_list.append(Transition((0, 1, 1), (1, 1, 1), 4.0))
>>> ins = np.arange(12) % 2
>>> cts = OrientedRasterCTS(grid, nsd, trn_list, ins)
>>> cts.n_trn
array([0, 1, 1, 0, 0, 2, 0, 0])
>>> cts.trn_id
array([[0, 0],
[0, 0],
[1, 0],
[0, 0],
[0, 0],
[2, 3],
[0, 0],
[0, 0]])
>>> cts.trn_to
array([2, 1, 6, 7])
>>> cts.trn_rate
array([ 1., 2., 3., 4.])
"""
# First, create an array that stores the number of possible transitions
# out of each state.
self.n_xn = np.zeros(self.num_link_states, dtype=int)
for xn in xn_list:
self.n_xn[xn.from_state] += 1
self.n_trn = np.zeros(self.num_link_states, dtype=np.int)
# Now, create arrays to hold the "to state" and transition rate for each
# transition. These arrays are dimensioned N x M where N is the number
# of states, and M is the maximum number of transitions from a single
# state (for example if state 3 could transition either to state 1 or
# state 4, and the other states only had one or zero possible
# transitions, then the maximum would be 2).
max_transitions = np.max(self.n_xn)
self.trn_id = np.zeros(
(self.num_link_states, max_transitions), dtype=np.int)
num_transitions = len(xn_list)
self.trn_to = np.zeros(num_transitions, dtype=np.int)
self.trn_rate = np.zeros(num_transitions)
self.trn_propswap = np.zeros(num_transitions, dtype=np.int8)
self.trn_prop_update_fn = np.zeros(num_transitions, dtype=object)
# OLD
self.xn_to = np.zeros(
(self.num_link_states, max_transitions), dtype=int)
self.xn_rate = np.zeros((self.num_link_states, max_transitions))
self.xn_propswap = np.zeros(
(self.num_link_states, max_transitions), dtype=np.int8)
self.xn_prop_update_fn = np.zeros(
(self.num_link_states, max_transitions), dtype=object)
for trn in range(num_transitions):
self.trn_to[trn] = xn_list[trn].to_state
self.trn_rate[trn] = xn_list[trn].rate
self.trn_propswap[trn] = xn_list[trn].swap_properties
if xn_list[trn].prop_update_fn is not None:
self.trn_prop_update_fn[trn] = xn_list[trn].prop_update_fn
self._use_propswap_or_callback = True
from_state = xn_list[trn].from_state
self.trn_id[from_state, self.n_trn[from_state]] = trn
self.n_trn[from_state] += 1
#OLD
# Populate the "to" and "rate" arrays
# reset this and then re-do (inefficient but should work)
self.n_xn[:] = 0
for xn in xn_list:
from_state = xn.from_state
self.xn_to[from_state][self.n_xn[from_state]] = xn.to_state
self.xn_rate[from_state][self.n_xn[from_state]] = xn.rate
self.xn_propswap[from_state][
self.n_xn[from_state]] = xn.swap_properties
if xn.prop_update_fn is not None:
self.xn_prop_update_fn[from_state][
self.n_xn[from_state]] = xn.prop_update_fn
self._use_propswap_or_callback = True
self.n_xn[from_state] += 1
if False and _DEBUG:
print()
print('setup_transition_data():')
print((' n_xn', self.n_xn))
print((' to:', self.xn_to))
print((' rate:', self.xn_rate))
def current_link_state(self, link_id):
"""Get the current state of a link.
Used to determine whether the link state at link *link_id* has changed
due to an independent change in the node-state grid. Returns the
current state of the link based on the states of its two end nodes;
this can be compared to the entry in self.link_state to determine
whether the state has changed.
Parameters
----------
link_id : int
ID of the active link to test
Returns
-------
int
New link state code
Notes
-----
Vectorizing this might yield some speed.
"""
# Find out the states of the two nodes, and the orientation
###tail_node_state = self.node_state[self.grid._activelink_fromnode[link_id]]
###head_node_state = self.node_state[self.grid._activelink_tonode[link_id]]
###orientation = self.active_link_orientation[link_id]
tail_node_state = self.node_state[self.grid.node_at_link_tail[link_id]]
head_node_state = self.node_state[self.grid.node_at_link_head[link_id]]
orientation = self.link_orientation[link_id]
# Return the corresponding state code.
#assert self.link_state_dict[(tail_node_state,head_node_state,orientation)]==orientation*self.num_node_states_sq+tail_node_state*self.num_node_states+head_node_state, 'ooops'
# return
# self.link_state_dict[(tail_node_state,head_node_state,orientation)]
return (orientation * self.num_node_states_sq +
tail_node_state * self.num_node_states + head_node_state)
def update_link_states_and_transitions(self, current_time):
"""
Following an "external" change to the node state grid, updates link
states where necessary and creates any needed events.
Notes
-----
**Algorithm**::
FOR each active link:
if the actual node pair is different from the link's code:
change the link state to be correct
schedule an event
"""
if _CYTEST:
update_link_states_and_transitions_new(self.grid.active_links,
self.node_state,
self.grid.node_at_link_tail,
self.grid.node_at_link_head,
self.link_orientation,
self.bnd_lnk,
self.link_state,
self.n_trn,
self.priority_queue,
self.next_update,
self.next_trn_id,
self.trn_id, self.trn_rate,
self.num_node_states,
self.num_node_states_sq,
current_time)
elif _USE_CYTHON:
update_link_states_and_transitions(self.grid.active_links,
self.node_state,
self.grid.node_at_link_tail,
self.grid.node_at_link_head,
self.link_orientation,
self.bnd_lnk,
self.link_state,
self.n_xn,
self.event_queue,
self.next_update,
self.xn_to, self.xn_rate,
self.num_node_states,
self.num_node_states_sq,
current_time,
self.xn_propswap,
self.xn_prop_update_fn)
else:
for i in self.grid.active_links:
current_state = self.current_link_state(i)
if current_state != self.link_state[i]:
self.update_link_state(i, current_state, current_time)
def update_link_states_and_transitions_new(self, current_time):
"""
Following an "external" change to the node state grid, updates link
states where necessary and creates any needed events.
Notes
-----
**Algorithm**::
FOR each active link:
if the actual node pair is different from the link's code:
change the link state to be correct
schedule an event
"""
if _CYTEST:
update_link_states_and_transitions_new(self.grid.active_links,
self.node_state,
self.grid.node_at_link_tail,
self.grid.node_at_link_head,
self.link_orientation,
self.bnd_lnk,
self.link_state,
self.n_trn,
self.priority_queue,
self.next_update,
self.next_trn_id,
self.trn_id, self.trn_rate,
self.num_node_states,
self.num_node_states_sq,
current_time)
else:
for i in self.grid.active_links:
current_state = self.current_link_state(i)
if current_state != self.link_state[i]:
self.update_link_state_new(i, current_state, current_time)
def get_next_event(self, link, current_state, current_time):
"""Get the next event for a link.
Returns the next event for link with ID "link", which is in state
"current state".
Parameters
----------
link : int
ID of the link
current_state : int
Current state code for the link
current_time : float
Current time in simulation (i.e., time of event just processed)
Returns
-------
Event object
The returned Event object contains the time, link ID, and type of
the next transition event at this link.
Notes
-----
If there is only one potential transition out of the current state, a
time for the transition is selected at random from an exponential
distribution with rate parameter appropriate for this transition.
If there are more than one potential transitions, a transition time is
chosen for each, and the smallest of these applied.
Assumes that there is at least one potential transition from the
current state.
"""
assert (self.n_xn[current_state] > 0), \
'must have at least one potential transition'
# Find next event time for each potential transition
if self.n_xn[current_state] == 1:
xn_to = self.xn_to[current_state][0]
propswap = self.xn_propswap[current_state][0]
next_time = np.random.exponential(
1.0 / self.xn_rate[current_state][0])
if _DEBUG:
print('next_time for 1 xn is ' + str(next_time))
prop_update_fn = self.xn_prop_update_fn[current_state][0]
else:
next_time = _NEVER
xn_to = None
propswap = False
for i in range(self.n_xn[current_state]):
this_next = np.random.exponential(
1.0 / self.xn_rate[current_state][i])
if _DEBUG:
print('this_next for >1 xn is ' + str(this_next))
if this_next < next_time:
next_time = this_next
xn_to = self.xn_to[current_state][i]
propswap = self.xn_propswap[current_state][i]
prop_update_fn = self.xn_prop_update_fn[current_state][i]
# Create and setup event, and return it
my_event = Event(next_time + current_time, link,
xn_to, propswap, prop_update_fn)
if _DEBUG:
print('get_next_event():')
print((' current state:', current_state))
print((' node pair:', self.node_pair[current_state]))
print((' next_time:', my_event.time))
print((' link:', my_event.link))
print((' xn_to:', my_event.xn_to))
print((' nxn:', self.n_xn[current_state]))
print((' rate:', self.xn_rate[current_state][:]))
print((' propswap:', my_event.propswap))
return my_event
def get_next_event_new(self, link, current_state, current_time):
"""Get the next event for a link.
Returns the next event for link with ID "link", which is in state
"current state".
Parameters
----------
link : int
ID of the link
current_state : int
Current state code for the link
current_time : float
Current time in simulation (i.e., time of event just processed)
Returns
-------
Event object
The returned Event object contains the time, link ID, and type of
the next transition event at this link.
Notes
-----
If there is only one potential transition out of the current state, a
time for the transition is selected at random from an exponential
distribution with rate parameter appropriate for this transition.
If there are more than one potential transitions, a transition time is
chosen for each, and the smallest of these applied.
Assumes that there is at least one potential transition from the
current state.
"""
assert (self.n_xn[current_state] > 0), \
'must have at least one potential transition'
#print('GNE P link ' + str(link) + ' state ' + str(current_state))
# Find next event time for each potential transition: new version
if self.n_trn[current_state] == 1:
trn_id = self.trn_id[current_state, 0]
next_time = np.random.exponential(1.0 / self.trn_rate[trn_id])
#print(' gne 1 trn, next = ' + str(next_time) + ' id=' + str(trn_id))
if _DEBUG:
print('rand (' + str(self.trn_rate[trn_id]) + ': ' + str(next_time))
else:
next_time = _NEVER
trn_id = -1
for i in range(self.n_trn[current_state]):
this_next = np.random.exponential(
1.0 / self.trn_rate[self.trn_id[current_state][i]])
if _DEBUG:
print('rand2 (' + str(self.trn_rate[self.trn_id[current_state][i]]) + ': ' + str(this_next))
if this_next < next_time:
if _DEBUG:
print(' using it')
next_time = this_next
trn_id = self.trn_id[current_state, i]
#print(' gne >1 trn, next = ' + str(next_time) + ' id=' + str(trn_id))
if _DEBUG:
print('get_next_event_new():')
print((' current state:', current_state))
print((' node pair:', self.node_pair[current_state]))
print((' next_time:', next_time))
print((' link:', link))
print((' trn_id:', trn_id))
return (next_time + current_time, trn_id)
def push_transitions_to_event_queue(self):
"""
Initializes the event queue by creating transition events for each
cell pair that has one or more potential transitions and pushing these
onto the queue. Also records scheduled transition times in the
self.next_update array.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.ca.celllab_cts import Transition
>>> from landlab.ca.oriented_raster_cts import OrientedRasterCTS
>>> import numpy as np
>>> grid = RasterModelGrid((3, 5))
>>> nsd = {0 : 'zero', 1 : 'one'}
>>> trn_list = []
>>> trn_list.append(Transition((0, 1, 0), (1, 0, 0), 1.0))
>>> trn_list.append(Transition((1, 0, 0), (0, 1, 0), 2.0))
>>> trn_list.append(Transition((0, 1, 1), (1, 0, 1), 3.0))
>>> trn_list.append(Transition((0, 1, 1), (1, 1, 1), 4.0))
>>> ins = np.arange(15) % 2
>>> cts = OrientedRasterCTS(grid, nsd, trn_list, ins)
"""
# >>> len(cts.event_queue)
# 7
# >>> np.round(100 * cts.event_queue[0].time)
# 12.0
# >>> cts.event_queue[0].link
# 16
# >>> np.round(100 * cts.next_update[16])
# 12.0
# >>> cts.event_queue[0].xn_to
# 7
# >>> np.round(100 * cts.event_queue[1].time)
# 28.0
# >>> cts.event_queue[1].link
# 11
# >>> cts.event_queue[1].xn_to
# 1
# >>> np.round(100 * cts.event_queue[6].time)
# 27.0
# >>> cts.event_queue[6].link
# 6
# >>> cts.event_queue[6].xn_to
# 6
if False and _DEBUG:
print(('push_transitions_to_event_queue():',
self.num_link_states, self.n_xn))
for i in self.grid.active_links:
# for i in range(self.grid.number_of_active_links):
if self.n_xn[self.link_state[i]] > 0:
#(event, ev_time, trn_id) = self.get_next_event_new(i, self.link_state[i], 0.0)
event = self.get_next_event(i, self.link_state[i], 0.0) #, self.n_xn,
# self.xn_to, self.xn_rate,
# self.xn_propswap,
# self.xn_prop_update_fn)
#print('At link ' + str(i) + ' with trn_id ' + str(trn_id))
#print('Pushing event ' + str(event.time) + ' ' + str(event.link) + ' ' + str(event.xn_to))
#print('This trn_id means trn_to ' + str(self.trn_to[trn_id]))
heappush(self.event_queue, event)
self.next_update[i] = event.time
# for NEW approach (gradual transition...)
#(next_time, trn_id) = self.get_next_event_new(i,self.link_state[i], 0.0)
#self.priority_queue.push(i, ev_time)
#self.next_trn_id[i] = trn_id
else:
self.next_update[i] = _NEVER
if False and _DEBUG:
print(' push_transitions_to_event_queue(): events in queue are now:')
for e in self.event_queue:
print(' next_time:', e.time, 'link:',
e.link, 'xn_to:', e.xn_to)
def push_transitions_to_event_queue_new(self):
"""
Initializes the event queue by creating transition events for each
cell pair that has one or more potential transitions and pushing these
onto the queue. Also records scheduled transition times in the
self.next_update array.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.ca.celllab_cts import Transition
>>> from landlab.ca.oriented_raster_cts import OrientedRasterCTS
>>> import numpy as np
>>> grid = RasterModelGrid((3, 5))
>>> nsd = {0 : 'zero', 1 : 'one'}
>>> trn_list = []
>>> trn_list.append(Transition((0, 1, 0), (1, 0, 0), 1.0))
>>> trn_list.append(Transition((1, 0, 0), (0, 1, 0), 2.0))
>>> trn_list.append(Transition((0, 1, 1), (1, 0, 1), 3.0))
>>> trn_list.append(Transition((0, 1, 1), (1, 1, 1), 4.0))
>>> ins = np.arange(15) % 2
>>> cts = OrientedRasterCTS(grid, nsd, trn_list, ins)
>>> ev0 = cts.priority_queue._queue[0]
>>> np.round(100 * ev0[0])
12.0
>>> ev0[2] # this is the link ID
16
>>> ev6 = cts.priority_queue._queue[6]
>>> np.round(100 * ev6[0])
27.0
>>> ev6[2] # this is the link ID
6
>>> cts.next_trn_id[ev0[2]] # ID of the transition to occur at this link
3
>>> cts.next_trn_id[cts.grid.active_links]
array([-1, 2, -1, 1, 0, 1, 0, 2, -1, 3])
"""
if False and _DEBUG:
print(('push_transitions_to_event_queue():',
self.num_link_states, self.n_xn))
for i in self.grid.active_links:
# for i in range(self.grid.number_of_active_links):
if self.n_xn[self.link_state[i]] > 0:
# if _CYTEST:
# (ev_time, trn_id) = get_next_event_new(
# i,
# self.link_state[i],
# 0.0,
# self.n_trn,
# self.trn_id,
# self.trn_rate)
# else:
(ev_time, trn_id) = self.get_next_event_new(i, self.link_state[i], 0.0)
#event = get_next_event(i, self.link_state[i], 0.0, self.n_xn,
# self.xn_to, self.xn_rate,
# self.xn_propswap,
# self.xn_prop_update_fn)
#print('At link ' + str(i) + ' with trn_id ' + str(trn_id))
#print('Pushing event ' + str(event.time) + ' ' + str(event.link) + ' ' + str(event.xn_to))
#print('This trn_id means trn_to ' + str(self.trn_to[trn_id]))
#heappush(self.event_queue, event)
# for NEW approach (gradual transition...)
#(next_time, trn_id) = self.get_next_event_new(i,self.link_state[i], 0.0)
self.priority_queue.push(i, ev_time)
self.next_update[i] = ev_time
self.next_trn_id[i] = trn_id
else:
self.next_update[i] = _NEVER
#@profile
def update_node_states(self, tail_node, head_node, new_link_state):
"""Update the states of the two nodes in the given link.
Parameters
----------
tail_node : int
ID of the tail node of the link (cell pair) in question
head_node : int
ID of the head node of the link (cell pair) in question
new_link_state : int
Link state code for the new cell pair
Returns
-------
(bool, bool)
Flags indicating whether the tail node and head node, respectively,
have changed state
"""
# Remember the previous state of each node so we can detect whether the
# state has changed
old_tail_node_state = self.node_state[tail_node]
old_head_node_state = self.node_state[head_node]
# Change to the new states
if self.grid.status_at_node[tail_node] == _CORE:
self.node_state[tail_node] = self.node_pair[new_link_state][0]
if self.grid.status_at_node[head_node] == _CORE:
self.node_state[head_node] = self.node_pair[new_link_state][1]
return self.node_state[tail_node] != old_tail_node_state, \
self.node_state[head_node] != old_head_node_state
def update_link_state(self, link, new_link_state, current_time):
"""
Implements a link transition by updating the current state of the link
and (if appropriate) choosing the next transition event and pushing it
on to the event queue.
Parameters
----------
link : int
ID of the link to update
new_link_state : int
Code for the new state
current_time : float
Current time in simulation
"""
if _DEBUG:
print('update_link_state() link ' + str(link) + ' to state ' + str(new_link_state) + ' at time ' + str(current_time))
# If the link connects to a boundary, we might have a different state
# than the one we planned
if self.bnd_lnk[link]:
fns = self.node_state[self.grid.node_at_link_tail[link]]
tns = self.node_state[self.grid.node_at_link_head[link]]
orientation = self.link_orientation[link]
new_link_state = orientation * self.num_node_states_sq + \
fns * self.num_node_states + tns
self.link_state[link] = new_link_state
if self.n_xn[new_link_state] > 0:
event = self.get_next_event(link, new_link_state, current_time)
heappush(self.event_queue, event)
self.next_update[link] = event.time
else:
self.next_update[link] = _NEVER
def update_link_state_new(self, link, new_link_state, current_time):
"""
Implements a link transition by updating the current state of the link
and (if appropriate) choosing the next transition event and pushing it
on to the event queue.
Parameters
----------
link : int
ID of the link to update
new_link_state : int
Code for the new state
current_time : float
Current time in simulation
"""
if _DEBUG:
print('update_link_state_new() link ' + str(link) + ' to state ' + str(new_link_state) + ' at time ' + str(current_time))
# If the link connects to a boundary, we might have a different state
# than the one we planned
if self.bnd_lnk[link]:
fns = self.node_state[self.grid.node_at_link_tail[link]]
tns = self.node_state[self.grid.node_at_link_head[link]]
orientation = self.link_orientation[link]
new_link_state = orientation * self.num_node_states_sq + \
fns * self.num_node_states + tns
self.link_state[link] = new_link_state
if self.n_xn[new_link_state] > 0:
if _CYTEST:
(event_time, trn_id) = get_next_event_new(link,
new_link_state,
current_time,
self.n_trn,
self.trn_id,
self.trn_rate)
else:
(event_time, trn_id) = self.get_next_event_new(link, new_link_state, current_time)
self.priority_queue.push(link, event_time)
if _DEBUG:
print('Pushed event at ' + str(link) + ' for time ' + str(event_time) + ' id ' + str(trn_id))
self.next_update[link] = event_time
self.next_trn_id[link] = trn_id
else:
self.next_update[link] = _NEVER
self.next_trn_id[link] = -1
def do_transition(self, event, current_time, plot_each_transition=False,
plotter=None):
"""Transition state.
Implements a state transition.
Parameters
----------
event : Event object
Event object containing the data for the current transition event
current_time : float
Current time in simulation
plot_each_transition : bool (optional)
True if caller wants to show a plot of the grid after this
transition
plotter : CAPlotter object
Sent if caller wants a plot after this transition
Notes
-----
First checks that the transition is still valid by comparing the
link's next_update time with the corresponding update time in the
event object.
If the transition is valid, we:
1. Update the states of the two nodes attached to the link
2. Update the link's state, choose its next transition, and push
it on the event queue.
3. Update the states of the other links attached to the two nodes,
choose their next transitions, and push them on the event queue.
"""
if _DEBUG:
print()
print('py do_transition() for link',event.link,'time',event.time, ' cur time ', current_time)
print(np.count_nonzero(self.node_state != 0))
print(self.link_state[event.link])
# We'll process the event if its update time matches the one we have
# recorded for the link in question. If not, it means that the link has
# changed state since the event was pushed onto the event queue, and
# in that case we'll ignore it.
if event.time == self.next_update[event.link]:
if _DEBUG:
print(' event time =', event.time)
tail_node = self.grid.node_at_link_tail[event.link]
head_node = self.grid.node_at_link_head[event.link]
#DEBUG
if plot_each_transition:
print(self.node_state[tail_node])
print(self.node_state[head_node])
print(self.link_orientation[event.link])
tail_changed, head_changed = self.update_node_states(
tail_node, head_node, event.xn_to)
self.update_link_state(event.link, event.xn_to, event.time)
if plot_each_transition:
print(self.node_state[tail_node])
print(self.node_state[head_node])
print(self.link_orientation[event.link])
# Next, when the state of one of the link's nodes changes, we have
# to update the states of the OTHER links attached to it. This
# could happen to one or both nodes.
if tail_changed:
if _DEBUG:
print(' tail node has changed state, so updating its links')
print(' links at node ' + str(tail_node) + ' are:')
print(self.grid.links_at_node[tail_node, :])
print(self.grid.active_link_dirs_at_node[tail_node, :])
for i in range(self.grid.links_at_node.shape[1]):
link = self.grid.links_at_node[tail_node, i]
dir_code = self.grid.active_link_dirs_at_node[tail_node, i]
if _DEBUG:
print('tail checking link', link)
print(' dir code ' + str(dir_code) + ' event link ' + str(event.link))
if dir_code != 0 and link != event.link:
this_link_fromnode = self.grid.node_at_link_tail[link]
this_link_tonode = self.grid.node_at_link_head[link]
orientation = self.link_orientation[link]
current_pair = (self.node_state[this_link_fromnode],
self.node_state[this_link_tonode],
orientation)
new_link_state = self.link_state_dict[current_pair]
new_link_state2 = (
orientation * self.num_node_states_sq +
self.node_state[this_link_fromnode] * self.num_node_states +
self.node_state[this_link_tonode])
assert new_link_state == new_link_state2, 'oops'
self.update_link_state(
link, new_link_state, event.time)
if head_changed:
if _DEBUG:
print(' head node has changed state, so updating its links')
for i in range(self.grid.links_at_node.shape[1]):
link = self.grid.links_at_node[head_node, i]
dir_code = self.grid.active_link_dirs_at_node[head_node, i]
if _DEBUG:
print('head checking link', link)
if dir_code != 0 and link != event.link:
this_link_fromnode = self.grid.node_at_link_tail[link]
this_link_tonode = self.grid.node_at_link_head[link]
orientation = self.link_orientation[link]
current_pair = (self.node_state[this_link_fromnode],
self.node_state[this_link_tonode],
orientation)
new_link_state = self.link_state_dict[current_pair]
new_link_state2 = (
orientation * self.num_node_states_sq +
self.node_state[this_link_fromnode] * self.num_node_states +
self.node_state[this_link_tonode])
assert new_link_state == new_link_state2, 'oops'
self.update_link_state(
link, new_link_state, event.time)
# If requested, display a plot of the grid
if plot_each_transition and (plotter is not None):
plotter.update_plot()
# If this event involves an exchange of properties (i.e., the
# event involves motion of an object that posses properties we
# want to track), implement the swap.
# If the event requires a call to a user-defined callback
# function, we handle that here too.
if event.propswap:
tmp = self.propid[tail_node]
self.propid[tail_node] = self.propid[head_node]
self.propid[head_node] = tmp
if self.grid.status_at_node[tail_node] != _CORE:
self.prop_data[self.propid[tail_node]
] = self.prop_reset_value
if self.grid.status_at_node[head_node] != _CORE:
self.prop_data[self.propid[head_node]
] = self.prop_reset_value
if event.prop_update_fn is not None:
event.prop_update_fn(
self, tail_node, head_node, event.time)
# if False and _DEBUG:
# n = self.grid.number_of_nodes
# for r in range(self.grid.number_of_node_rows):
# for c in range(self.grid.number_of_node_columns):
# n -= 1
# print('{0:.0f}'.format(self.node_state[n]), end=' ')
# print()
# if self.propid is not None:
# print()
# n = self.grid.number_of_nodes
# for r in range(self.grid.number_of_node_rows):
# for c in range(self.grid.number_of_node_columns):
# n -= 1
# print('{0:2.0f}'.format(self.propid[n]), end=' ')
# print()
elif _DEBUG:
print(' event time is', event.time, 'but update time is',
self.next_update[event.link], 'so event will be ignored')
def do_transition_new(self, event_link, event_time, current_time, plot_each_transition=False,
plotter=None):
"""Transition state.
Implements a state transition.
Parameters
----------
event : Event object
Event object containing the data for the current transition event
current_time : float
Current time in simulation
plot_each_transition : bool (optional)
True if caller wants to show a plot of the grid after this
transition
plotter : CAPlotter object
Sent if caller wants a plot after this transition
Notes
-----
First checks that the transition is still valid by comparing the
link's next_update time with the corresponding update time in the
event object.
If the transition is valid, we:
1. Update the states of the two nodes attached to the link
2. Update the link's state, choose its next transition, and push
it on the event queue.
3. Update the states of the other links attached to the two nodes,
choose their next transitions, and push them on the event queue.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.ca.celllab_cts import Transition
>>> from landlab.ca.oriented_raster_cts import OrientedRasterCTS
>>> import numpy as np
>>> grid = RasterModelGrid((3, 5))
>>> nsd = {0 : 'zero', 1 : 'one'}
>>> trn_list = []
>>> trn_list.append(Transition((0, 1, 0), (1, 0, 0), 1.0))
>>> trn_list.append(Transition((1, 0, 0), (0, 1, 0), 2.0))
>>> trn_list.append(Transition((0, 1, 1), (1, 0, 1), 3.0))
>>> trn_list.append(Transition((0, 1, 1), (1, 1, 1), 4.0))
>>> ins = np.arange(15) % 2
>>> cts = OrientedRasterCTS(grid, nsd, trn_list, ins)
>>> (tm, idx, link) = cts.priority_queue.pop()
>>> np.round(100 * tm)
12.0
>>> idx
6
>>> link
16
>>> cts.grid.node_at_link_tail[link]
8
>>> cts.grid.node_at_link_head[link]
13
>>> cts.next_trn_id[link]
3
>>> cts.trn_to[cts.next_trn_id[link]]
7
>>> cts.do_transition_new(link, tm, 0.0)
>>> cts.node_state[8]
1
>>> cts.node_state[13]
1
>>> cts.link_state[16]
7
>>> cts.next_update[16] == _NEVER
True
>>> cts.next_trn_id[16]
-1
"""
if _DEBUG:
print()
print('py do_transition_new() for link', event_link,'time',
event_time, ' cur time ', current_time)
# We'll process the event if its update time matches the one we have
# recorded for the link in question. If not, it means that the link has
# changed state since the event was pushed onto the event queue, and
# in that case we'll ignore it.
if event_time == self.next_update[event_link]:
if _DEBUG:
print(' event time =', event_time)
tail_node = self.grid.node_at_link_tail[event_link]
head_node = self.grid.node_at_link_head[event_link]
trn_id = self.next_trn_id[event_link]
trn_to = self.trn_to[trn_id]
if _CYTEST:
old_tail_node_state = self.node_state[tail_node]
old_head_node_state = self.node_state[head_node]
update_node_states(self.node_state,
self.grid.status_at_node,
tail_node,
head_node,
trn_to,
self.num_node_states)
tail_changed = (old_tail_node_state != self.node_state[tail_node])
head_changed = (old_head_node_state != self.node_state[head_node])
else:
tail_changed, head_changed = self.update_node_states(
tail_node, head_node, trn_to)
# if _CYTEST:
# update_link_state_new(event_link, trn_to, event_time,
# self.bnd_lnk,
# self.node_state,
# self.grid.node_at_link_tail,
# self.grid.node_at_link_head,
# self.link_orientation,
# self.num_node_states,
# self.num_node_states_sq,
# self.link_state,
# self.n_trn,
# self.priority_queue,
# self.next_update,
# self.next_trn_id,
# self.trn_id,
# self.trn_rate)
# else:
self.update_link_state_new(event_link, trn_to, event_time)
# Next, when the state of one of the link's nodes changes, we have
# to update the states of the OTHER links attached to it. This
# could happen to one or both nodes.
if tail_changed:
if _DEBUG:
print(' tail node has changed state, so updating its links')
print(' links at node ' + str(tail_node) + ' are:')
print(self.grid.links_at_node[tail_node, :])
print(self.grid.active_link_dirs_at_node[tail_node, :])
for i in range(self.grid.links_at_node.shape[1]):
link = self.grid.links_at_node[tail_node, i]
dir_code = self.grid.active_link_dirs_at_node[tail_node, i]
if _DEBUG:
print('tail checking link', link)
print(' dir code ' + str(dir_code) + ' event link ' + str(event_link))
if dir_code != 0 and link != event_link:
this_link_fromnode = self.grid.node_at_link_tail[link]
this_link_tonode = self.grid.node_at_link_head[link]
orientation = self.link_orientation[link]
current_pair = (self.node_state[this_link_fromnode],
self.node_state[this_link_tonode],
orientation)
new_link_state = self.link_state_dict[current_pair]
new_link_state2 = (
orientation * self.num_node_states_sq +
self.node_state[this_link_fromnode] * self.num_node_states +
self.node_state[this_link_tonode])
assert new_link_state == new_link_state2, 'oops'
# if _CYTEST:
# update_link_state_new(link, new_link_state, event_time,
# self.bnd_lnk, self.node_state,
# self.grid.node_at_link_tail,
# self.grid.node_at_link_head,
# self.link_orientation,
# self.num_node_states,
# self.num_node_states_sq,
# self.link_state, self.n_trn,
# self.priority_queue,
# self.next_update,
# self.next_trn_id,
# self.trn_id, self.trn_rate)
# else:
self.update_link_state_new(
link, new_link_state, event_time)
if head_changed:
if _DEBUG:
print(' head node has changed state, so updating its links')
for i in range(self.grid.links_at_node.shape[1]):
link = self.grid.links_at_node[head_node, i]
dir_code = self.grid.active_link_dirs_at_node[head_node, i]
if _DEBUG:
print('head checking link', link)
if dir_code != 0 and link != event_link:
this_link_fromnode = self.grid.node_at_link_tail[link]
this_link_tonode = self.grid.node_at_link_head[link]
orientation = self.link_orientation[link]
current_pair = (self.node_state[this_link_fromnode],
self.node_state[this_link_tonode],
orientation)
new_link_state = self.link_state_dict[current_pair]
new_link_state2 = (
orientation * self.num_node_states_sq +
self.node_state[this_link_fromnode] * self.num_node_states +
self.node_state[this_link_tonode])
assert new_link_state == new_link_state2, 'oops'
# if _CYTEST:
# update_link_state_new(link, new_link_state, event_time,
# self.bnd_lnk, self.node_state,
# self.grid.node_at_link_tail,
# self.grid.node_at_link_head,
# self.link_orientation,
# self.num_node_states,
# self.num_node_states_sq,
# self.link_state, self.n_trn,
# self.priority_queue,
# self.next_update,
# self.next_trn_id,
# self.trn_id, self.trn_rate)
# else:
self.update_link_state_new(
link, new_link_state, event_time)
# If requested, display a plot of the grid
if plot_each_transition and (plotter is not None):
plotter.update_plot()
# If this event involves an exchange of properties (i.e., the
# event involves motion of an object that posses properties we
# want to track), implement the swap.
# If the event requires a call to a user-defined callback
# function, we handle that here too.
if self.trn_propswap[trn_id]:
tmp = self.propid[tail_node]
self.propid[tail_node] = self.propid[head_node]
self.propid[head_node] = tmp
if self.grid.status_at_node[tail_node] != _CORE:
self.prop_data[self.propid[tail_node]
] = self.prop_reset_value
if self.grid.status_at_node[head_node] != _CORE:
self.prop_data[self.propid[head_node]
] = self.prop_reset_value
if self.trn_prop_update_fn[trn_id] is not None:
self.trn_prop_update_fn[trn_id](
self, tail_node, head_node, event_time)
if False and _DEBUG:
n = self.grid.number_of_nodes
for r in range(self.grid.number_of_node_rows):
for c in range(self.grid.number_of_node_columns):
n -= 1
print('{0:.0f}'.format(self.node_state[n]), end=' ')
print()
if self.propid is not None:
print()
n = self.grid.number_of_nodes
for r in range(self.grid.number_of_node_rows):
for c in range(self.grid.number_of_node_columns):
n -= 1
print('{0:2.0f}'.format(self.propid[n]), end=' ')
print()
elif _DEBUG:
print(' event time is', event_time, 'but update time is',
self.next_update[event_link], 'so event will be ignored')
def update_component_data(self, new_node_state_array):
"""Update all component data.
Call this method to update all data held by the component, if, for
example, another component or boundary conditions modify the node
statuses outside the component between run steps.
This method updates all necessary properties, including both node and
link states.
*new_node_state_array* is the updated list of node states, which must
still all be compatible with the state list originally supplied to
this component.
"""
self.set_node_state_grid(new_node_state_array)
self.assign_link_states_from_node_types()
self.push_transitions_to_event_queue()
#@profile
def run(self, run_to, node_state_grid=None,
plot_each_transition=False, plotter=None):
"""Run the model forward for a specified period of time.
Parameters
----------
run_to : float
Time to run to, starting from self.current_time
node_state_grid : 1D array of ints (x number of nodes) (optional)
Node states (if given, replaces model's current node state grid)
plot_each_transition : bool (optional)
Option to display the grid after each transition
plotter : CAPlotter object (optional)
Needed if caller wants to plot after every transition
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.ca.celllab_cts import Transition
>>> from landlab.ca.oriented_raster_cts import OrientedRasterCTS
>>> import numpy as np
>>> grid = RasterModelGrid((3, 5))
>>> nsd = {0 : 'zero', 1 : 'one'}
>>> trn_list = []
>>> trn_list.append(Transition((0, 1, 0), (1, 0, 0), 1.0))
>>> trn_list.append(Transition((1, 0, 0), (0, 1, 0), 2.0))
>>> trn_list.append(Transition((0, 1, 1), (1, 0, 1), 3.0))
>>> trn_list.append(Transition((0, 1, 1), (1, 1, 1), 4.0))
>>> ins = np.arange(15) % 2
>>> cts = OrientedRasterCTS(grid, nsd, trn_list, ins)
"""
if node_state_grid is not None:
self.set_node_state_grid(node_state_grid)
if plot_each_transition or self._use_propswap_or_callback:
lean_run = False
else:
lean_run = True
if _USE_CYTHON and not lean_run:
self.current_time = run_cts(run_to, self.current_time,
plot_each_transition,
plotter,
self.event_queue,
self.next_update,
self.grid.node_at_link_tail,
self.grid.node_at_link_head,
self.node_state,
self.link_state,
self.grid.status_at_node,
self.link_orientation,
self.propid,
self.prop_data,
self.n_xn,
self.xn_to,
self.xn_rate,
self.grid.links_at_node,
self.grid.active_link_dirs_at_node,
self.num_node_states,
self.num_node_states_sq,
self.prop_reset_value,
self.xn_propswap,
self.xn_prop_update_fn,
self.bnd_lnk,
self)
elif _USE_CYTHON and lean_run:
self.current_time = run_cts_lean(run_to, self.current_time,
self.event_queue,
self.next_update,
self.grid.node_at_link_tail,
self.grid.node_at_link_head,
self.node_state,
self.link_state,
self.grid.status_at_node,
self.link_orientation,
self.n_xn,
self.xn_to,
self.xn_rate,
self.grid.links_at_node,
self.grid.active_link_dirs_at_node,
self.num_node_states,
self.num_node_states_sq,
self.bnd_lnk)
elif _RUN_NEW:
# switch off the run fn:
#self.run_new(run_to, plot_each_transition, plotter)
self.current_time = run_cts_new(run_to, self.current_time,
self.priority_queue,
self.next_update,
self.grid.node_at_link_tail,
self.grid.node_at_link_head,
self.node_state,
self.next_trn_id,
self.trn_to,
self.grid.status_at_node,
self.num_node_states,
self.num_node_states_sq,
self.bnd_lnk,
self.link_orientation,
self.link_state,
self.n_trn,
self.trn_id,
self.trn_rate,
self.grid.links_at_node,
self.grid.active_link_dirs_at_node,
self.trn_propswap,
self.propid,
self.prop_data,
self.prop_reset_value,
self.trn_prop_update_fn,
self,
plot_each_transition,
plotter)
else:
# Continue until we've run out of either time or events
while self.current_time < run_to and self.event_queue:
if _DEBUG:
print('Current Time = ', self.current_time)
# Is there an event scheduled to occur within this run?
if self.event_queue[0].time <= run_to:
# If so, pick the next transition event from the event queue
ev = heappop(self.event_queue)
if _DEBUG:
print('Event:', ev.time, ev.link, ev.xn_to)
self.do_transition(ev, self.current_time,
plot_each_transition, plotter)
# Update current time
self.current_time = ev.time
# If there is no event scheduled for this span of time, simply
# advance current_time to the end of the current run period.
else:
self.current_time = run_to
if _DEBUG:
print(self.node_state)
def run_new(self, run_to, plot_each_transition=False, plotter=None):
"""Test of new approach using priority queue."""
# Continue until we've run out of either time or events
while self.current_time < run_to and self.priority_queue._queue:
if _DEBUG:
print('Current Time = ', self.current_time)
# Is there an event scheduled to occur within this run?
if self.priority_queue._queue[0][0] <= run_to:
# If so, pick the next transition event from the event queue
(ev_time, ev_idx, ev_link) = self.priority_queue.pop()
if _DEBUG:
print('Event:', ev_time, ev_link, self.trn_to[self.next_trn_id[ev_link]])
if _CYTEST:
do_transition_new(ev_link, ev_time,
self.priority_queue, self.next_update,
self.grid.node_at_link_tail,
self.grid.node_at_link_head,
self.node_state,
self.next_trn_id,
self.trn_to,
self.grid.status_at_node,
self.num_node_states,
self.num_node_states_sq,
self.bnd_lnk,
self.link_orientation,
self.link_state,
self.n_trn,
self.trn_id,
self.trn_rate,
self.grid.links_at_node,
self.grid.active_link_dirs_at_node,
self.trn_propswap,
self.propid,
self.prop_data,
self.prop_reset_value,
self.trn_prop_update_fn,
self,
plot_each_transition=False,
plotter=None)
else:
self.do_transition_new(ev_link, ev_time, self.current_time,
plot_each_transition, plotter)
# Update current time
self.current_time = ev_time
# If there is no event scheduled for this span of time, simply
# advance current_time to the end of the current run period.
else:
self.current_time = run_to
if _DEBUG:
print(self.node_state)
if __name__ == "__main__":
import doctest
doctest.testmod()
| mit |
soylentdeen/BlurryApple | Diagnostics/TT_test/compare_closed_loops.py | 1 | 1190 | import scipy
import numpy
import pyfits
import matplotlib.pyplot as pyplot
fig = pyplot.figure(0)
datadir = '/home/deen/Data/GRAVITY/FISBA/TipTilt/refslope0/'
closed_files = ["T+T_closed.fits", "T-T_closed.fits", "TT+_closed.fits", "TT-_closed.fits"]
images = []
mask = numpy.ones((1024, 1020), dtype=numpy.bool)
for closed in closed_files:
image = pyfits.getdata(datadir+closed)
hdr = pyfits.getheader(datadir+closed)
nonapval = hdr["NONAPVAL"]
new_mask = numpy.not_equal(image, nonapval)
mask = numpy.all(numpy.vstack((mask.ravel(), new_mask.ravel())), axis=0).reshape(image.shape)
#complement = numpy.equal(image, nonapval)
#image[complement] = numpy.median(image[mask])
images.append(image)
images = numpy.array(images)
average = numpy.zeros(image.shape)
average[mask] = numpy.mean(images[:,mask], axis=0)
minval = numpy.min(images[:,mask]-average[mask])
maxval = numpy.max(images[:,mask]-average[mask])
for i in range(len(images)):
ax = fig.add_axes([0.1+0.4*(i/2), 0.1+0.4*(i%2), 0.4, 0.4])
template = numpy.zeros(image.shape)
template[mask] = images[i,mask]
im = ax.imshow(template-average, vmin=minval, vmax=maxval)
fig.show()
| gpl-2.0 |
walshjon/openmc | tests/regression_tests/tally_slice_merge/test.py | 1 | 6521 | import hashlib
import itertools
import openmc
from tests.testing_harness import PyAPITestHarness
class TallySliceMergeTestHarness(PyAPITestHarness):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Define nuclides and scores to add to both tallies
self.nuclides = ['U235', 'U238']
self.scores = ['fission', 'nu-fission']
# Define filters for energy and spatial domain
low_energy = openmc.EnergyFilter([0., 0.625])
high_energy = openmc.EnergyFilter([0.625, 20.e6])
merged_energies = low_energy.merge(high_energy)
cell_21 = openmc.CellFilter(21)
cell_27 = openmc.CellFilter(27)
distribcell_filter = openmc.DistribcellFilter(21)
mesh = openmc.Mesh(name='mesh')
mesh.type = 'regular'
mesh.dimension = [2, 2]
mesh.lower_left = [-50., -50.]
mesh.upper_right = [+50., +50.]
mesh_filter = openmc.MeshFilter(mesh)
self.cell_filters = [cell_21, cell_27]
self.energy_filters = [low_energy, high_energy]
# Initialize cell tallies with filters, nuclides and scores
tallies = []
for energy_filter in self.energy_filters:
for cell_filter in self.cell_filters:
for nuclide in self.nuclides:
for score in self.scores:
tally = openmc.Tally()
tally.estimator = 'tracklength'
tally.scores.append(score)
tally.nuclides.append(nuclide)
tally.filters.append(cell_filter)
tally.filters.append(energy_filter)
tallies.append(tally)
# Merge all cell tallies together
while len(tallies) != 1:
halfway = len(tallies) // 2
zip_split = zip(tallies[:halfway], tallies[halfway:])
tallies = list(map(lambda xy: xy[0].merge(xy[1]), zip_split))
# Specify a name for the tally
tallies[0].name = 'cell tally'
# Initialize a distribcell tally
distribcell_tally = openmc.Tally(name='distribcell tally')
distribcell_tally.estimator = 'tracklength'
distribcell_tally.filters = [distribcell_filter, merged_energies]
for score in self.scores:
distribcell_tally.scores.append(score)
for nuclide in self.nuclides:
distribcell_tally.nuclides.append(nuclide)
mesh_tally = openmc.Tally(name='mesh tally')
mesh_tally.estimator = 'tracklength'
mesh_tally.filters = [mesh_filter, merged_energies]
mesh_tally.scores = self.scores
mesh_tally.nuclides = self.nuclides
# Add tallies to a Tallies object
self._model.tallies = [tallies[0], distribcell_tally, mesh_tally]
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
sp = openmc.StatePoint(self._sp_name)
# Extract the cell tally
tallies = [sp.get_tally(name='cell tally')]
# Slice the tallies by cell filter bins
cell_filter_prod = itertools.product(tallies, self.cell_filters)
tallies = map(lambda tf: tf[0].get_slice(filters=[type(tf[1])],
filter_bins=[tf[1].get_bin(0)]), cell_filter_prod)
# Slice the tallies by energy filter bins
energy_filter_prod = itertools.product(tallies, self.energy_filters)
tallies = map(lambda tf: tf[0].get_slice(filters=[type(tf[1])],
filter_bins=[(tf[1].get_bin(0),)]), energy_filter_prod)
# Slice the tallies by nuclide
nuclide_prod = itertools.product(tallies, self.nuclides)
tallies = map(lambda tn: tn[0].get_slice(nuclides=[tn[1]]), nuclide_prod)
# Slice the tallies by score
score_prod = itertools.product(tallies, self.scores)
tallies = map(lambda ts: ts[0].get_slice(scores=[ts[1]]), score_prod)
tallies = list(tallies)
# Initialize an output string
outstr = ''
# Append sliced Tally Pandas DataFrames to output string
for tally in tallies:
df = tally.get_pandas_dataframe()
outstr += df.to_string()
# Merge all tallies together
while len(tallies) != 1:
halfway = int(len(tallies) / 2)
zip_split = zip(tallies[:halfway], tallies[halfway:])
tallies = list(map(lambda xy: xy[0].merge(xy[1]), zip_split))
# Append merged Tally Pandas DataFrame to output string
df = tallies[0].get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Extract the distribcell tally
distribcell_tally = sp.get_tally(name='distribcell tally')
# Sum up a few subdomains from the distribcell tally
sum1 = distribcell_tally.summation(filter_type=openmc.DistribcellFilter,
filter_bins=[0, 100, 2000, 30000])
# Sum up a few subdomains from the distribcell tally
sum2 = distribcell_tally.summation(filter_type=openmc.DistribcellFilter,
filter_bins=[500, 5000, 50000])
# Merge the distribcell tally slices
merge_tally = sum1.merge(sum2)
# Append merged Tally Pandas DataFrame to output string
df = merge_tally.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Extract the mesh tally
mesh_tally = sp.get_tally(name='mesh tally')
# Sum up a few subdomains from the mesh tally
sum1 = mesh_tally.summation(filter_type=openmc.MeshFilter,
filter_bins=[(1, 1), (1, 2)])
# Sum up a few subdomains from the mesh tally
sum2 = mesh_tally.summation(filter_type=openmc.MeshFilter,
filter_bins=[(2, 1), (2, 2)])
# Merge the mesh tally slices
merge_tally = sum1.merge(sum2)
# Append merged Tally Pandas DataFrame to output string
df = merge_tally.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def test_tally_slice_merge():
harness = TallySliceMergeTestHarness('statepoint.10.h5')
harness.main()
| mit |
chen1474147/Deep3DPose | 5-caffe/python/detect.py | 23 | 5743 | #!/usr/bin/env python
"""
detector.py is an out-of-the-box windowed detector
callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
Note that this model was trained for image classification and not detection,
and finetuning for detection can be expected to improve results.
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
TODO:
- batch up image filenames as well: don't want to load all of them into memory
- come up with a batching scheme that preserved order / keeps a unique ID
"""
import numpy as np
import pandas as pd
import os
import argparse
import time
import caffe
CROP_MODES = ['list', 'selective_search']
COORD_COLS = ['ymin', 'xmin', 'ymax', 'xmax']
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output.
parser.add_argument(
"input_file",
help="Input txt/csv filename. If .txt, must be list of filenames.\
If .csv, must be comma-separated file with header\
'filename, xmin, ymin, xmax, ymax'"
)
parser.add_argument(
"output_file",
help="Output h5/csv filename. Format depends on extension."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/deploy.prototxt.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"),
help="Trained model weights file."
)
parser.add_argument(
"--crop_mode",
default="selective_search",
choices=CROP_MODES,
help="How to generate windows for detection."
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of H x W x K dimensions (numpy array). " +
"Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
help="Multiply input features by this scale to finish preprocessing."
)
parser.add_argument(
"--raw_scale",
type=float,
default=255.0,
help="Multiply raw input by this scale before preprocessing."
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--context_pad",
type=int,
default='16',
help="Amount of surrounding context to collect in input window."
)
args = parser.parse_args()
mean, channel_swap = None, None
if args.mean_file:
mean = np.load(args.mean_file)
if mean.shape[1:] != (1, 1):
mean = mean.mean(1).mean(1)
if args.channel_swap:
channel_swap = [int(s) for s in args.channel_swap.split(',')]
if args.gpu:
caffe.set_mode_gpu()
print("GPU mode")
else:
caffe.set_mode_cpu()
print("CPU mode")
# Make detector.
detector = caffe.Detector(args.model_def, args.pretrained_model, mean=mean,
input_scale=args.input_scale, raw_scale=args.raw_scale,
channel_swap=channel_swap,
context_pad=args.context_pad)
# Load input.
t = time.time()
print("Loading input...")
if args.input_file.lower().endswith('txt'):
with open(args.input_file) as f:
inputs = [_.strip() for _ in f.readlines()]
elif args.input_file.lower().endswith('csv'):
inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str})
inputs.set_index('filename', inplace=True)
else:
raise Exception("Unknown input file type: not in txt or csv.")
# Detect.
if args.crop_mode == 'list':
# Unpack sequence of (image filename, windows).
images_windows = [
(ix, inputs.iloc[np.where(inputs.index == ix)][COORD_COLS].values)
for ix in inputs.index.unique()
]
detections = detector.detect_windows(images_windows)
else:
detections = detector.detect_selective_search(inputs)
print("Processed {} windows in {:.3f} s.".format(len(detections),
time.time() - t))
# Collect into dataframe with labeled fields.
df = pd.DataFrame(detections)
df.set_index('filename', inplace=True)
df[COORD_COLS] = pd.DataFrame(
data=np.vstack(df['window']), index=df.index, columns=COORD_COLS)
del(df['window'])
# Save results.
t = time.time()
if args.output_file.lower().endswith('csv'):
# csv
# Enumerate the class probabilities.
class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)]
df[class_cols] = pd.DataFrame(
data=np.vstack(df['feat']), index=df.index, columns=class_cols)
df.to_csv(args.output_file, cols=COORD_COLS + class_cols)
else:
# h5
df.to_hdf(args.output_file, 'df', mode='w')
print("Saved to {} in {:.3f} s.".format(args.output_file,
time.time() - t))
if __name__ == "__main__":
import sys
main(sys.argv)
| mit |
mavenlin/tensorflow | tensorflow/examples/learn/text_classification_character_cnn.py | 29 | 5666 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using convolutional networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
N_FILTERS = 10
FILTER_SHAPE1 = [20, 256]
FILTER_SHAPE2 = [20, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_cnn_model(features, labels, mode):
"""Character level convolutional neural network model to predict classes."""
features_onehot = tf.one_hot(features[CHARS_FEATURE], 256)
input_layer = tf.reshape(
features_onehot, [-1, MAX_DOCUMENT_LENGTH, 256, 1])
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
input_layer,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data, size='large')
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
x_train = x_train.reshape([-1, MAX_DOCUMENT_LENGTH, 1, 1])
x_test = x_test.reshape([-1, MAX_DOCUMENT_LENGTH, 1, 1])
# Build model
classifier = tf.estimator.Estimator(model_fn=char_cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
lhilt/scipy | scipy/signal/windows/windows.py | 5 | 74216 | """The suite of window functions."""
from __future__ import division, print_function, absolute_import
import operator
import warnings
import numpy as np
from scipy import linalg, special, fft as sp_fft
from scipy._lib.six import string_types
__all__ = ['boxcar', 'triang', 'parzen', 'bohman', 'blackman', 'nuttall',
'blackmanharris', 'flattop', 'bartlett', 'hanning', 'barthann',
'hamming', 'kaiser', 'gaussian', 'general_cosine','general_gaussian',
'general_hamming', 'chebwin', 'slepian', 'cosine', 'hann',
'exponential', 'tukey', 'dpss', 'get_window']
def _len_guards(M):
"""Handle small or incorrect window lengths"""
if int(M) != M or M < 0:
raise ValueError('Window length M must be a non-negative integer')
return M <= 1
def _extend(M, sym):
"""Extend window by 1 sample if needed for DFT-even symmetry"""
if not sym:
return M + 1, True
else:
return M, False
def _truncate(w, needed):
"""Truncate window by 1 sample if needed for DFT-even symmetry"""
if needed:
return w[:-1]
else:
return w
def general_cosine(M, a, sym=True):
r"""
Generic weighted sum of cosine terms window
Parameters
----------
M : int
Number of points in the output window
a : array_like
Sequence of weighting coefficients. This uses the convention of being
centered on the origin, so these will typically all be positive
numbers, not alternating sign.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
References
----------
.. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE
Transactions on Acoustics, Speech, and Signal Processing, vol. 29,
no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`.
.. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the
Discrete Fourier transform (DFT), including a comprehensive list of
window functions and some new flat-top windows", February 15, 2002
https://holometer.fnal.gov/GH_FFT.pdf
Examples
--------
Heinzel describes a flat-top window named "HFT90D" with formula: [2]_
.. math:: w_j = 1 - 1.942604 \cos(z) + 1.340318 \cos(2z)
- 0.440811 \cos(3z) + 0.043097 \cos(4z)
where
.. math:: z = \frac{2 \pi j}{N}, j = 0...N - 1
Since this uses the convention of starting at the origin, to reproduce the
window, we need to convert every other coefficient to a positive number:
>>> HFT90D = [1, 1.942604, 1.340318, 0.440811, 0.043097]
The paper states that the highest sidelobe is at -90.2 dB. Reproduce
Figure 42 by plotting the window and its frequency response, and confirm
the sidelobe level in red:
>>> from scipy.signal.windows import general_cosine
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = general_cosine(1000, HFT90D, sym=False)
>>> plt.plot(window)
>>> plt.title("HFT90D window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 10000) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = np.abs(fftshift(A / abs(A).max()))
>>> response = 20 * np.log10(np.maximum(response, 1e-10))
>>> plt.plot(freq, response)
>>> plt.axis([-50/1000, 50/1000, -140, 0])
>>> plt.title("Frequency response of the HFT90D window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.axhline(-90.2, color='red')
>>> plt.show()
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
fac = np.linspace(-np.pi, np.pi, M)
w = np.zeros(M)
for k in range(len(a)):
w += a[k] * np.cos(k * fac)
return _truncate(w, needs_trunc)
def boxcar(M, sym=True):
"""Return a boxcar or rectangular window.
Also known as a rectangular window or Dirichlet window, this is equivalent
to no window at all.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
Whether the window is symmetric. (Has no effect for boxcar.)
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.boxcar(51)
>>> plt.plot(window)
>>> plt.title("Boxcar window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the boxcar window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
w = np.ones(M, float)
return _truncate(w, needs_trunc)
def triang(M, sym=True):
"""Return a triangular window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
See Also
--------
bartlett : A triangular window that touches zero
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.triang(51)
>>> plt.plot(window)
>>> plt.title("Triangular window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = np.abs(fftshift(A / abs(A).max()))
>>> response = 20 * np.log10(np.maximum(response, 1e-10))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the triangular window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(1, (M + 1) // 2 + 1)
if M % 2 == 0:
w = (2 * n - 1.0) / M
w = np.r_[w, w[::-1]]
else:
w = 2 * n / (M + 1.0)
w = np.r_[w, w[-2::-1]]
return _truncate(w, needs_trunc)
def parzen(M, sym=True):
"""Return a Parzen window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] E. Parzen, "Mathematical Considerations in the Estimation of
Spectra", Technometrics, Vol. 3, No. 2 (May, 1961), pp. 167-190
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.parzen(51)
>>> plt.plot(window)
>>> plt.title("Parzen window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Parzen window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0)
na = np.extract(n < -(M - 1) / 4.0, n)
nb = np.extract(abs(n) <= (M - 1) / 4.0, n)
wa = 2 * (1 - np.abs(na) / (M / 2.0)) ** 3.0
wb = (1 - 6 * (np.abs(nb) / (M / 2.0)) ** 2.0 +
6 * (np.abs(nb) / (M / 2.0)) ** 3.0)
w = np.r_[wa, wb, wa[::-1]]
return _truncate(w, needs_trunc)
def bohman(M, sym=True):
"""Return a Bohman window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bohman(51)
>>> plt.plot(window)
>>> plt.title("Bohman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bohman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
fac = np.abs(np.linspace(-1, 1, M)[1:-1])
w = (1 - fac) * np.cos(np.pi * fac) + 1.0 / np.pi * np.sin(np.pi * fac)
w = np.r_[0, w, 0]
return _truncate(w, needs_trunc)
def blackman(M, sym=True):
r"""
Return a Blackman window.
The Blackman window is a taper formed by using the first three terms of
a summation of cosines. It was designed to have close to the minimal
leakage possible. It is close to optimal, only slightly worse than a
Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/M) + 0.08 \cos(4\pi n/M)
The "exact Blackman" window was designed to null out the third and fourth
sidelobes, but has discontinuities at the boundaries, resulting in a
6 dB/oct fall-off. This window is an approximation of the "exact" window,
which does not null the sidelobes as well, but is smooth at the edges,
improving the fall-off rate to 18 dB/oct. [3]_
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the Kaiser window.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
.. [3] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. :doi:`10.1109/PROC.1978.10837`.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackman(51)
>>> plt.plot(window)
>>> plt.title("Blackman window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = np.abs(fftshift(A / abs(A).max()))
>>> response = 20 * np.log10(np.maximum(response, 1e-10))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's blackman function
return general_cosine(M, [0.42, 0.50, 0.08], sym)
def nuttall(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window according to Nuttall.
This variation is called "Nuttall4c" by Heinzel. [2]_
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] A. Nuttall, "Some windows with very good sidelobe behavior," IEEE
Transactions on Acoustics, Speech, and Signal Processing, vol. 29,
no. 1, pp. 84-91, Feb 1981. :doi:`10.1109/TASSP.1981.1163506`.
.. [2] Heinzel G. et al., "Spectrum and spectral density estimation by the
Discrete Fourier transform (DFT), including a comprehensive list of
window functions and some new flat-top windows", February 15, 2002
https://holometer.fnal.gov/GH_FFT.pdf
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.nuttall(51)
>>> plt.plot(window)
>>> plt.title("Nuttall window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Nuttall window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return general_cosine(M, [0.3635819, 0.4891775, 0.1365995, 0.0106411], sym)
def blackmanharris(M, sym=True):
"""Return a minimum 4-term Blackman-Harris window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.blackmanharris(51)
>>> plt.plot(window)
>>> plt.title("Blackman-Harris window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Blackman-Harris window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
return general_cosine(M, [0.35875, 0.48829, 0.14128, 0.01168], sym)
def flattop(M, sym=True):
"""Return a flat top window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
Flat top windows are used for taking accurate measurements of signal
amplitude in the frequency domain, with minimal scalloping error from the
center of a frequency bin to its edges, compared to others. This is a
5th-order cosine window, with the 5 terms optimized to make the main lobe
maximally flat. [1]_
References
----------
.. [1] D'Antona, Gabriele, and A. Ferrero, "Digital Signal Processing for
Measurement Systems", Springer Media, 2006, p. 70
:doi:`10.1007/0-387-28666-7`.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.flattop(51)
>>> plt.plot(window)
>>> plt.title("Flat top window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the flat top window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
a = [0.21557895, 0.41663158, 0.277263158, 0.083578947, 0.006947368]
return general_cosine(M, a, sym)
def bartlett(M, sym=True):
r"""
Return a Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The triangular window, with the first and last samples equal to zero
and the maximum value normalized to 1 (though the value 1 does not
appear if `M` is even and `sym` is True).
See Also
--------
triang : A triangular window that does not touch zero at the ends
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \frac{2}{M-1} \left(
\frac{M-1}{2} - \left|n - \frac{M-1}{2}\right|
\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The Fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich. [2]_
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.bartlett(51)
>>> plt.plot(window)
>>> plt.title("Bartlett window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's bartlett function
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M)
w = np.where(np.less_equal(n, (M - 1) / 2.0),
2.0 * n / (M - 1), 2.0 - 2.0 * n / (M - 1))
return _truncate(w, needs_trunc)
def hann(M, sym=True):
r"""
Return a Hann window.
The Hann window is a taper formed by using a raised cosine or sine-squared
with ends that touch zero.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hann window is defined as
.. math:: w(n) = 0.5 - 0.5 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The window was named for Julius von Hann, an Austrian meteorologist. It is
also known as the Cosine Bell. It is sometimes erroneously referred to as
the "Hanning" window, from the use of "hann" as a verb in the original
paper and confusion with the very similar Hamming window.
Most references to the Hann window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hann(51)
>>> plt.plot(window)
>>> plt.title("Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = np.abs(fftshift(A / abs(A).max()))
>>> response = 20 * np.log10(np.maximum(response, 1e-10))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hanning function
return general_hamming(M, 0.5, sym)
@np.deprecate(new_name='scipy.signal.windows.hann')
def hanning(*args, **kwargs):
return hann(*args, **kwargs)
def tukey(M, alpha=0.5, sym=True):
r"""Return a Tukey window, also known as a tapered cosine window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float, optional
Shape parameter of the Tukey window, representing the fraction of the
window inside the cosine tapered region.
If zero, the Tukey window is equivalent to a rectangular window.
If one, the Tukey window is equivalent to a Hann window.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
References
----------
.. [1] Harris, Fredric J. (Jan 1978). "On the use of Windows for Harmonic
Analysis with the Discrete Fourier Transform". Proceedings of the
IEEE 66 (1): 51-83. :doi:`10.1109/PROC.1978.10837`
.. [2] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function#Tukey_window
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.tukey(51)
>>> plt.plot(window)
>>> plt.title("Tukey window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.ylim([0, 1.1])
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Tukey window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
if alpha <= 0:
return np.ones(M, 'd')
elif alpha >= 1.0:
return hann(M, sym=sym)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M)
width = int(np.floor(alpha*(M-1)/2.0))
n1 = n[0:width+1]
n2 = n[width+1:M-width-1]
n3 = n[M-width-1:]
w1 = 0.5 * (1 + np.cos(np.pi * (-1 + 2.0*n1/alpha/(M-1))))
w2 = np.ones(n2.shape)
w3 = 0.5 * (1 + np.cos(np.pi * (-2.0/alpha + 1 + 2.0*n3/alpha/(M-1))))
w = np.concatenate((w1, w2, w3))
return _truncate(w, needs_trunc)
def barthann(M, sym=True):
"""Return a modified Bartlett-Hann window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.barthann(51)
>>> plt.plot(window)
>>> plt.title("Bartlett-Hann window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Bartlett-Hann window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M)
fac = np.abs(n / (M - 1.0) - 0.5)
w = 0.62 - 0.48 * fac + 0.38 * np.cos(2 * np.pi * fac)
return _truncate(w, needs_trunc)
def general_hamming(M, alpha, sym=True):
r"""Return a generalized Hamming window.
The generalized Hamming window is constructed by multiplying a rectangular
window by one period of a cosine function [1]_.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
alpha : float
The window coefficient, :math:`\alpha`
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Hamming window is defined as
.. math:: w(n) = \alpha - \left(1 - \alpha\right) \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
Both the common Hamming window and Hann window are special cases of the
generalized Hamming window with :math:`\alpha` = 0.54 and :math:`\alpha` =
0.5, respectively [2]_.
See Also
--------
hamming, hann
Examples
--------
The Sentinel-1A/B Instrument Processing Facility uses generalized Hamming
windows in the processing of spaceborne Synthetic Aperture Radar (SAR)
data [3]_. The facility uses various values for the :math:`\alpha`
parameter based on operating mode of the SAR instrument. Some common
:math:`\alpha` values include 0.75, 0.7 and 0.52 [4]_. As an example, we
plot these different windows.
>>> from scipy.signal.windows import general_hamming
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> fig1, spatial_plot = plt.subplots()
>>> spatial_plot.set_title("Generalized Hamming Windows")
>>> spatial_plot.set_ylabel("Amplitude")
>>> spatial_plot.set_xlabel("Sample")
>>> fig2, freq_plot = plt.subplots()
>>> freq_plot.set_title("Frequency Responses")
>>> freq_plot.set_ylabel("Normalized magnitude [dB]")
>>> freq_plot.set_xlabel("Normalized frequency [cycles per sample]")
>>> for alpha in [0.75, 0.7, 0.52]:
... window = general_hamming(41, alpha)
... spatial_plot.plot(window, label="{:.2f}".format(alpha))
... A = fft(window, 2048) / (len(window)/2.0)
... freq = np.linspace(-0.5, 0.5, len(A))
... response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
... freq_plot.plot(freq, response, label="{:.2f}".format(alpha))
>>> freq_plot.legend(loc="upper right")
>>> spatial_plot.legend(loc="upper right")
References
----------
.. [1] DSPRelated, "Generalized Hamming Window Family",
https://www.dsprelated.com/freebooks/sasp/Generalized_Hamming_Window_Family.html
.. [2] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [3] Riccardo Piantanida ESA, "Sentinel-1 Level 1 Detailed Algorithm
Definition",
https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Level-1-Detailed-Algorithm-Definition
.. [4] Matthieu Bourbigot ESA, "Sentinel-1 Product Definition",
https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Definition
"""
return general_cosine(M, [alpha, 1. - alpha], sym)
def hamming(M, sym=True):
r"""Return a Hamming window.
The Hamming window is a taper formed by using a raised cosine with
non-zero endpoints, optimized to minimize the nearest side lobe.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46 \cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.hamming(51)
>>> plt.plot(window)
>>> plt.title("Hamming window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Hamming window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's hamming function
return general_hamming(M, 0.54, sym)
def kaiser(M, beta, sym=True):
r"""Return a Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
beta : float
Shape parameter, determines trade-off between main-lobe width and
side lobe level. As beta gets large, the window narrows.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\left( \beta \sqrt{1-\frac{4n^2}{(M-1)^2}}
\right)/I_0(\beta)
with
.. math:: \quad -\frac{M-1}{2} \leq n \leq \frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate other windows by varying the beta parameter.
(Some literature uses alpha = beta/pi.) [4]_
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hann
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
be returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transform," Proceedings of the IEEE, vol. 66,
no. 1, pp. 51-83, Jan. 1978. :doi:`10.1109/PROC.1978.10837`.
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.kaiser(51, beta=14)
>>> plt.plot(window)
>>> plt.title(r"Kaiser window ($\beta$=14)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Kaiser window ($\beta$=14)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
# Docstring adapted from NumPy's kaiser function
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M)
alpha = (M - 1) / 2.0
w = (special.i0(beta * np.sqrt(1 - ((n - alpha) / alpha) ** 2.0)) /
special.i0(beta))
return _truncate(w, needs_trunc)
def gaussian(M, std, sym=True):
r"""Return a Gaussian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
std : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left(\frac{n}{\sigma}\right)^2 }
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.gaussian(51, std=7)
>>> plt.plot(window)
>>> plt.title(r"Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Frequency response of the Gaussian window ($\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M) - (M - 1.0) / 2.0
sig2 = 2 * std * std
w = np.exp(-n ** 2 / sig2)
return _truncate(w, needs_trunc)
def general_gaussian(M, p, sig, sym=True):
r"""Return a window with a generalized Gaussian shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
p : float
Shape parameter. p = 1 is identical to `gaussian`, p = 0.5 is
the same shape as the Laplace distribution.
sig : float
The standard deviation, sigma.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The generalized Gaussian window is defined as
.. math:: w(n) = e^{ -\frac{1}{2}\left|\frac{n}{\sigma}\right|^{2p} }
the half-power point is at
.. math:: (2 \log(2))^{1/(2 p)} \sigma
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.general_gaussian(51, p=1.5, sig=7)
>>> plt.plot(window)
>>> plt.title(r"Generalized Gaussian window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title(r"Freq. resp. of the gen. Gaussian "
... r"window (p=1.5, $\sigma$=7)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
n = np.arange(0, M) - (M - 1.0) / 2.0
w = np.exp(-0.5 * np.abs(n / sig) ** (2 * p))
return _truncate(w, needs_trunc)
# `chebwin` contributed by Kumar Appaiah.
def chebwin(M, at, sym=True):
r"""Return a Dolph-Chebyshev window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
at : float
Attenuation (in dB).
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
Notes
-----
This window optimizes for the narrowest main lobe width for a given order
`M` and sidelobe equiripple attenuation `at`, using Chebyshev
polynomials. It was originally developed by Dolph to optimize the
directionality of radio antenna arrays.
Unlike most windows, the Dolph-Chebyshev is defined in terms of its
frequency response:
.. math:: W(k) = \frac
{\cos\{M \cos^{-1}[\beta \cos(\frac{\pi k}{M})]\}}
{\cosh[M \cosh^{-1}(\beta)]}
where
.. math:: \beta = \cosh \left [\frac{1}{M}
\cosh^{-1}(10^\frac{A}{20}) \right ]
and 0 <= abs(k) <= M-1. A is the attenuation in decibels (`at`).
The time domain window is then generated using the IFFT, so
power-of-two `M` are the fastest to generate, and prime number `M` are
the slowest.
The equiripple condition in the frequency domain creates impulses in the
time domain, which appear at the ends of the window.
References
----------
.. [1] C. Dolph, "A current distribution for broadside arrays which
optimizes the relationship between beam width and side-lobe level",
Proceedings of the IEEE, Vol. 34, Issue 6
.. [2] Peter Lynch, "The Dolph-Chebyshev Window: A Simple Optimal Filter",
American Meteorological Society (April 1997)
http://mathsci.ucd.ie/~plynch/Publications/Dolph.pdf
.. [3] F. J. Harris, "On the use of windows for harmonic analysis with the
discrete Fourier transforms", Proceedings of the IEEE, Vol. 66,
No. 1, January 1978
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.chebwin(51, at=100)
>>> plt.plot(window)
>>> plt.title("Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Dolph-Chebyshev window (100 dB)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
if np.abs(at) < 45:
warnings.warn("This window is not suitable for spectral analysis "
"for attenuation values lower than about 45dB because "
"the equivalent noise bandwidth of a Chebyshev window "
"does not grow monotonically with increasing sidelobe "
"attenuation when the attenuation is smaller than "
"about 45 dB.")
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
# compute the parameter beta
order = M - 1.0
beta = np.cosh(1.0 / order * np.arccosh(10 ** (np.abs(at) / 20.)))
k = np.r_[0:M] * 1.0
x = beta * np.cos(np.pi * k / M)
# Find the window's DFT coefficients
# Use analytic definition of Chebyshev polynomial instead of expansion
# from scipy.special. Using the expansion in scipy.special leads to errors.
p = np.zeros(x.shape)
p[x > 1] = np.cosh(order * np.arccosh(x[x > 1]))
p[x < -1] = (2 * (M % 2) - 1) * np.cosh(order * np.arccosh(-x[x < -1]))
p[np.abs(x) <= 1] = np.cos(order * np.arccos(x[np.abs(x) <= 1]))
# Appropriate IDFT and filling up
# depending on even/odd M
if M % 2:
w = np.real(sp_fft.fft(p))
n = (M + 1) // 2
w = w[:n]
w = np.concatenate((w[n - 1:0:-1], w))
else:
p = p * np.exp(1.j * np.pi / M * np.r_[0:M])
w = np.real(sp_fft.fft(p))
n = M // 2 + 1
w = np.concatenate((w[n - 1:0:-1], w[1:n]))
w = w / max(w)
return _truncate(w, needs_trunc)
def slepian(M, width, sym=True):
"""Return a digital Slepian (DPSS) window.
Used to maximize the energy concentration in the main lobe. Also called
the digital prolate spheroidal sequence (DPSS).
.. note:: Deprecated in SciPy 1.1.
`slepian` will be removed in a future version of SciPy, it is
replaced by `dpss`, which uses the standard definition of a
digital Slepian window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
width : float
Bandwidth
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value always normalized to 1
See Also
--------
dpss
References
----------
.. [1] D. Slepian & H. O. Pollak: "Prolate spheroidal wave functions,
Fourier analysis and uncertainty-I," Bell Syst. Tech. J., vol.40,
pp.43-63, 1961. https://archive.org/details/bstj40-1-43
.. [2] H. J. Landau & H. O. Pollak: "Prolate spheroidal wave functions,
Fourier analysis and uncertainty-II," Bell Syst. Tech. J. , vol.40,
pp.65-83, 1961. https://archive.org/details/bstj40-1-65
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.slepian(51, width=0.3)
>>> plt.plot(window)
>>> plt.title("Slepian (DPSS) window (BW=0.3)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the Slepian window (BW=0.3)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
"""
warnings.warn('slepian is deprecated and will be removed in a future '
'version, use dpss instead', DeprecationWarning)
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
# our width is the full bandwidth
width = width / 2
# to match the old version
width = width / 2
m = np.arange(M, dtype='d')
H = np.zeros((2, M))
H[0, 1:] = m[1:] * (M - m[1:]) / 2
H[1, :] = ((M - 1 - 2 * m) / 2)**2 * np.cos(2 * np.pi * width)
_, win = linalg.eig_banded(H, select='i', select_range=(M-1, M-1))
win = win.ravel() / win.max()
return _truncate(win, needs_trunc)
def cosine(M, sym=True):
"""Return a window with a simple cosine shape.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
Plot the window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = signal.cosine(51)
>>> plt.plot(window)
>>> plt.title("Cosine window")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -120, 0])
>>> plt.title("Frequency response of the cosine window")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
>>> plt.show()
"""
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
w = np.sin(np.pi / M * (np.arange(0, M) + .5))
return _truncate(w, needs_trunc)
def exponential(M, center=None, tau=1., sym=True):
r"""Return an exponential (or Poisson) window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
center : float, optional
Parameter defining the center location of the window function.
The default value if not given is ``center = (M-1) / 2``. This
parameter must take its default value for symmetric windows.
tau : float, optional
Parameter defining the decay. For ``center = 0`` use
``tau = -(M-1) / ln(x)`` if ``x`` is the fraction of the window
remaining at the end.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
Returns
-------
w : ndarray
The window, with the maximum value normalized to 1 (though the value 1
does not appear if `M` is even and `sym` is True).
Notes
-----
The Exponential window is defined as
.. math:: w(n) = e^{-|n-center| / \tau}
References
----------
S. Gade and H. Herlufsen, "Windows to FFT analysis (Part I)",
Technical Review 3, Bruel & Kjaer, 1987.
Examples
--------
Plot the symmetric window and its frequency response:
>>> from scipy import signal
>>> from scipy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> M = 51
>>> tau = 3.0
>>> window = signal.exponential(M, tau=tau)
>>> plt.plot(window)
>>> plt.title("Exponential Window (tau=3.0)")
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
>>> plt.figure()
>>> A = fft(window, 2048) / (len(window)/2.0)
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max())))
>>> plt.plot(freq, response)
>>> plt.axis([-0.5, 0.5, -35, 0])
>>> plt.title("Frequency response of the Exponential window (tau=3.0)")
>>> plt.ylabel("Normalized magnitude [dB]")
>>> plt.xlabel("Normalized frequency [cycles per sample]")
This function can also generate non-symmetric windows:
>>> tau2 = -(M-1) / np.log(0.01)
>>> window2 = signal.exponential(M, 0, tau2, False)
>>> plt.figure()
>>> plt.plot(window2)
>>> plt.ylabel("Amplitude")
>>> plt.xlabel("Sample")
"""
if sym and center is not None:
raise ValueError("If sym==True, center must be None.")
if _len_guards(M):
return np.ones(M)
M, needs_trunc = _extend(M, sym)
if center is None:
center = (M-1) / 2
n = np.arange(0, M)
w = np.exp(-np.abs(n-center) / tau)
return _truncate(w, needs_trunc)
def dpss(M, NW, Kmax=None, sym=True, norm=None, return_ratios=False):
"""
Compute the Discrete Prolate Spheroidal Sequences (DPSS).
DPSS (or Slepian sequences) are often used in multitaper power spectral
density estimation (see [1]_). The first window in the sequence can be
used to maximize the energy concentration in the main lobe, and is also
called the Slepian window.
Parameters
----------
M : int
Window length.
NW : float
Standardized half bandwidth corresponding to ``2*NW = BW/f0 = BW*N*dt``
where ``dt`` is taken as 1.
Kmax : int | None, optional
Number of DPSS windows to return (orders ``0`` through ``Kmax-1``).
If None (default), return only a single window of shape ``(M,)``
instead of an array of windows of shape ``(Kmax, M)``.
sym : bool, optional
When True (default), generates a symmetric window, for use in filter
design.
When False, generates a periodic window, for use in spectral analysis.
norm : {2, 'approximate', 'subsample'} | None, optional
If 'approximate' or 'subsample', then the windows are normalized by the
maximum, and a correction scale-factor for even-length windows
is applied either using ``M**2/(M**2+NW)`` ("approximate") or
a FFT-based subsample shift ("subsample"), see Notes for details.
If None, then "approximate" is used when ``Kmax=None`` and 2 otherwise
(which uses the l2 norm).
return_ratios : bool, optional
If True, also return the concentration ratios in addition to the
windows.
Returns
-------
v : ndarray, shape (Kmax, N) or (N,)
The DPSS windows. Will be 1D if `Kmax` is None.
r : ndarray, shape (Kmax,) or float, optional
The concentration ratios for the windows. Only returned if
`return_ratios` evaluates to True. Will be 0D if `Kmax` is None.
Notes
-----
This computation uses the tridiagonal eigenvector formulation given
in [2]_.
The default normalization for ``Kmax=None``, i.e. window-generation mode,
simply using the l-infinity norm would create a window with two unity
values, which creates slight normalization differences between even and odd
orders. The approximate correction of ``M**2/float(M**2+NW)`` for even
sample numbers is used to counteract this effect (see Examples below).
For very long signals (e.g., 1e6 elements), it can be useful to compute
windows orders of magnitude shorter and use interpolation (e.g.,
`scipy.interpolate.interp1d`) to obtain tapers of length `M`,
but this in general will not preserve orthogonality between the tapers.
.. versionadded:: 1.1
References
----------
.. [1] Percival DB, Walden WT. Spectral Analysis for Physical Applications:
Multitaper and Conventional Univariate Techniques.
Cambridge University Press; 1993.
.. [2] Slepian, D. Prolate spheroidal wave functions, Fourier analysis, and
uncertainty V: The discrete case. Bell System Technical Journal,
Volume 57 (1978), 1371430.
.. [3] Kaiser, JF, Schafer RW. On the Use of the I0-Sinh Window for
Spectrum Analysis. IEEE Transactions on Acoustics, Speech and
Signal Processing. ASSP-28 (1): 105-107; 1980.
Examples
--------
We can compare the window to `kaiser`, which was invented as an alternative
that was easier to calculate [3]_ (example adapted from
`here <https://ccrma.stanford.edu/~jos/sasp/Kaiser_DPSS_Windows_Compared.html>`_):
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import windows, freqz
>>> N = 51
>>> fig, axes = plt.subplots(3, 2, figsize=(5, 7))
>>> for ai, alpha in enumerate((1, 3, 5)):
... win_dpss = windows.dpss(N, alpha)
... beta = alpha*np.pi
... win_kaiser = windows.kaiser(N, beta)
... for win, c in ((win_dpss, 'k'), (win_kaiser, 'r')):
... win /= win.sum()
... axes[ai, 0].plot(win, color=c, lw=1.)
... axes[ai, 0].set(xlim=[0, N-1], title=r'$\\alpha$ = %s' % alpha,
... ylabel='Amplitude')
... w, h = freqz(win)
... axes[ai, 1].plot(w, 20 * np.log10(np.abs(h)), color=c, lw=1.)
... axes[ai, 1].set(xlim=[0, np.pi],
... title=r'$\\beta$ = %0.2f' % beta,
... ylabel='Magnitude (dB)')
>>> for ax in axes.ravel():
... ax.grid(True)
>>> axes[2, 1].legend(['DPSS', 'Kaiser'])
>>> fig.tight_layout()
>>> plt.show()
And here are examples of the first four windows, along with their
concentration ratios:
>>> M = 512
>>> NW = 2.5
>>> win, eigvals = windows.dpss(M, NW, 4, return_ratios=True)
>>> fig, ax = plt.subplots(1)
>>> ax.plot(win.T, linewidth=1.)
>>> ax.set(xlim=[0, M-1], ylim=[-0.1, 0.1], xlabel='Samples',
... title='DPSS, M=%d, NW=%0.1f' % (M, NW))
>>> ax.legend(['win[%d] (%0.4f)' % (ii, ratio)
... for ii, ratio in enumerate(eigvals)])
>>> fig.tight_layout()
>>> plt.show()
Using a standard :math:`l_{\\infty}` norm would produce two unity values
for even `M`, but only one unity value for odd `M`. This produces uneven
window power that can be counteracted by the approximate correction
``M**2/float(M**2+NW)``, which can be selected by using
``norm='approximate'`` (which is the same as ``norm=None`` when
``Kmax=None``, as is the case here). Alternatively, the slower
``norm='subsample'`` can be used, which uses subsample shifting in the
frequency domain (FFT) to compute the correction:
>>> Ms = np.arange(1, 41)
>>> factors = (50, 20, 10, 5, 2.0001)
>>> energy = np.empty((3, len(Ms), len(factors)))
>>> for mi, M in enumerate(Ms):
... for fi, factor in enumerate(factors):
... NW = M / float(factor)
... # Corrected using empirical approximation (default)
... win = windows.dpss(M, NW)
... energy[0, mi, fi] = np.sum(win ** 2) / np.sqrt(M)
... # Corrected using subsample shifting
... win = windows.dpss(M, NW, norm='subsample')
... energy[1, mi, fi] = np.sum(win ** 2) / np.sqrt(M)
... # Uncorrected (using l-infinity norm)
... win /= win.max()
... energy[2, mi, fi] = np.sum(win ** 2) / np.sqrt(M)
>>> fig, ax = plt.subplots(1)
>>> hs = ax.plot(Ms, energy[2], '-o', markersize=4,
... markeredgecolor='none')
>>> leg = [hs[-1]]
>>> for hi, hh in enumerate(hs):
... h1 = ax.plot(Ms, energy[0, :, hi], '-o', markersize=4,
... color=hh.get_color(), markeredgecolor='none',
... alpha=0.66)
... h2 = ax.plot(Ms, energy[1, :, hi], '-o', markersize=4,
... color=hh.get_color(), markeredgecolor='none',
... alpha=0.33)
... if hi == len(hs) - 1:
... leg.insert(0, h1[0])
... leg.insert(0, h2[0])
>>> ax.set(xlabel='M (samples)', ylabel=r'Power / $\\sqrt{M}$')
>>> ax.legend(leg, ['Uncorrected', r'Corrected: $\\frac{M^2}{M^2+NW}$',
... 'Corrected (subsample)'])
>>> fig.tight_layout()
""" # noqa: E501
if _len_guards(M):
return np.ones(M)
if norm is None:
norm = 'approximate' if Kmax is None else 2
known_norms = (2, 'approximate', 'subsample')
if norm not in known_norms:
raise ValueError('norm must be one of %s, got %s'
% (known_norms, norm))
if Kmax is None:
singleton = True
Kmax = 1
else:
singleton = False
Kmax = operator.index(Kmax)
if not 0 < Kmax <= M:
raise ValueError('Kmax must be greater than 0 and less than M')
if NW >= M/2.:
raise ValueError('NW must be less than M/2.')
if NW <= 0:
raise ValueError('NW must be positive')
M, needs_trunc = _extend(M, sym)
W = float(NW) / M
nidx = np.arange(M)
# Here we want to set up an optimization problem to find a sequence
# whose energy is maximally concentrated within band [-W,W].
# Thus, the measure lambda(T,W) is the ratio between the energy within
# that band, and the total energy. This leads to the eigen-system
# (A - (l1)I)v = 0, where the eigenvector corresponding to the largest
# eigenvalue is the sequence with maximally concentrated energy. The
# collection of eigenvectors of this system are called Slepian
# sequences, or discrete prolate spheroidal sequences (DPSS). Only the
# first K, K = 2NW/dt orders of DPSS will exhibit good spectral
# concentration
# [see https://en.wikipedia.org/wiki/Spectral_concentration_problem]
# Here we set up an alternative symmetric tri-diagonal eigenvalue
# problem such that
# (B - (l2)I)v = 0, and v are our DPSS (but eigenvalues l2 != l1)
# the main diagonal = ([N-1-2*t]/2)**2 cos(2PIW), t=[0,1,2,...,N-1]
# and the first off-diagonal = t(N-t)/2, t=[1,2,...,N-1]
# [see Percival and Walden, 1993]
d = ((M - 1 - 2 * nidx) / 2.) ** 2 * np.cos(2 * np.pi * W)
e = nidx[1:] * (M - nidx[1:]) / 2.
# only calculate the highest Kmax eigenvalues
w, windows = linalg.eigh_tridiagonal(
d, e, select='i', select_range=(M - Kmax, M - 1))
w = w[::-1]
windows = windows[:, ::-1].T
# By convention (Percival and Walden, 1993 pg 379)
# * symmetric tapers (k=0,2,4,...) should have a positive average.
fix_even = (windows[::2].sum(axis=1) < 0)
for i, f in enumerate(fix_even):
if f:
windows[2 * i] *= -1
# * antisymmetric tapers should begin with a positive lobe
# (this depends on the definition of "lobe", here we'll take the first
# point above the numerical noise, which should be good enough for
# sufficiently smooth functions, and more robust than relying on an
# algorithm that uses max(abs(w)), which is susceptible to numerical
# noise problems)
thresh = max(1e-7, 1. / M)
for i, w in enumerate(windows[1::2]):
if w[w * w > thresh][0] < 0:
windows[2 * i + 1] *= -1
# Now find the eigenvalues of the original spectral concentration problem
# Use the autocorr sequence technique from Percival and Walden, 1993 pg 390
if return_ratios:
dpss_rxx = _fftautocorr(windows)
r = 4 * W * np.sinc(2 * W * nidx)
r[0] = 2 * W
ratios = np.dot(dpss_rxx, r)
if singleton:
ratios = ratios[0]
# Deal with sym and Kmax=None
if norm != 2:
windows /= windows.max()
if M % 2 == 0:
if norm == 'approximate':
correction = M**2 / float(M**2 + NW)
else:
s = sp_fft.rfft(windows[0])
shift = -(1 - 1./M) * np.arange(1, M//2 + 1)
s[1:] *= 2 * np.exp(-1j * np.pi * shift)
correction = M / s.real.sum()
windows *= correction
# else we're already l2 normed, so do nothing
if needs_trunc:
windows = windows[:, :-1]
if singleton:
windows = windows[0]
return (windows, ratios) if return_ratios else windows
def _fftautocorr(x):
"""Compute the autocorrelation of a real array and crop the result."""
N = x.shape[-1]
use_N = sp_fft.next_fast_len(2*N-1)
x_fft = sp_fft.rfft(x, use_N, axis=-1)
cxy = sp_fft.irfft(x_fft * x_fft.conj(), n=use_N)[:, :N]
# Or equivalently (but in most cases slower):
# cxy = np.array([np.convolve(xx, yy[::-1], mode='full')
# for xx, yy in zip(x, x)])[:, N-1:2*N-1]
return cxy
_win_equiv_raw = {
('barthann', 'brthan', 'bth'): (barthann, False),
('bartlett', 'bart', 'brt'): (bartlett, False),
('blackman', 'black', 'blk'): (blackman, False),
('blackmanharris', 'blackharr', 'bkh'): (blackmanharris, False),
('bohman', 'bman', 'bmn'): (bohman, False),
('boxcar', 'box', 'ones',
'rect', 'rectangular'): (boxcar, False),
('chebwin', 'cheb'): (chebwin, True),
('cosine', 'halfcosine'): (cosine, False),
('exponential', 'poisson'): (exponential, True),
('flattop', 'flat', 'flt'): (flattop, False),
('gaussian', 'gauss', 'gss'): (gaussian, True),
('general gaussian', 'general_gaussian',
'general gauss', 'general_gauss', 'ggs'): (general_gaussian, True),
('hamming', 'hamm', 'ham'): (hamming, False),
('hanning', 'hann', 'han'): (hann, False),
('kaiser', 'ksr'): (kaiser, True),
('nuttall', 'nutl', 'nut'): (nuttall, False),
('parzen', 'parz', 'par'): (parzen, False),
('slepian', 'slep', 'optimal', 'dpss', 'dss'): (slepian, True),
('triangle', 'triang', 'tri'): (triang, False),
('tukey', 'tuk'): (tukey, True),
}
# Fill dict with all valid window name strings
_win_equiv = {}
for k, v in _win_equiv_raw.items():
for key in k:
_win_equiv[key] = v[0]
# Keep track of which windows need additional parameters
_needs_param = set()
for k, v in _win_equiv_raw.items():
if v[1]:
_needs_param.update(k)
def get_window(window, Nx, fftbins=True):
"""
Return a window of a given length and type.
Parameters
----------
window : string, float, or tuple
The type of window to create. See below for more details.
Nx : int
The number of samples in the window.
fftbins : bool, optional
If True (default), create a "periodic" window, ready to use with
`ifftshift` and be multiplied by the result of an FFT (see also
:func:`~scipy.fft.fftfreq`).
If False, create a "symmetric" window, for use in filter design.
Returns
-------
get_window : ndarray
Returns a window of length `Nx` and type `window`
Notes
-----
Window types:
- `~scipy.signal.windows.boxcar`
- `~scipy.signal.windows.triang`
- `~scipy.signal.windows.blackman`
- `~scipy.signal.windows.hamming`
- `~scipy.signal.windows.hann`
- `~scipy.signal.windows.bartlett`
- `~scipy.signal.windows.flattop`
- `~scipy.signal.windows.parzen`
- `~scipy.signal.windows.bohman`
- `~scipy.signal.windows.blackmanharris`
- `~scipy.signal.windows.nuttall`
- `~scipy.signal.windows.barthann`
- `~scipy.signal.windows.kaiser` (needs beta)
- `~scipy.signal.windows.gaussian` (needs standard deviation)
- `~scipy.signal.windows.general_gaussian` (needs power, width)
- `~scipy.signal.windows.slepian` (needs width)
- `~scipy.signal.windows.dpss` (needs normalized half-bandwidth)
- `~scipy.signal.windows.chebwin` (needs attenuation)
- `~scipy.signal.windows.exponential` (needs decay scale)
- `~scipy.signal.windows.tukey` (needs taper fraction)
If the window requires no parameters, then `window` can be a string.
If the window requires parameters, then `window` must be a tuple
with the first argument the string name of the window, and the next
arguments the needed parameters.
If `window` is a floating point number, it is interpreted as the beta
parameter of the `~scipy.signal.windows.kaiser` window.
Each of the window types listed above is also the name of
a function that can be called directly to create a window of
that type.
Examples
--------
>>> from scipy import signal
>>> signal.get_window('triang', 7)
array([ 0.125, 0.375, 0.625, 0.875, 0.875, 0.625, 0.375])
>>> signal.get_window(('kaiser', 4.0), 9)
array([ 0.08848053, 0.29425961, 0.56437221, 0.82160913, 0.97885093,
0.97885093, 0.82160913, 0.56437221, 0.29425961])
>>> signal.get_window(4.0, 9)
array([ 0.08848053, 0.29425961, 0.56437221, 0.82160913, 0.97885093,
0.97885093, 0.82160913, 0.56437221, 0.29425961])
"""
sym = not fftbins
try:
beta = float(window)
except (TypeError, ValueError):
args = ()
if isinstance(window, tuple):
winstr = window[0]
if len(window) > 1:
args = window[1:]
elif isinstance(window, string_types):
if window in _needs_param:
raise ValueError("The '" + window + "' window needs one or "
"more parameters -- pass a tuple.")
else:
winstr = window
else:
raise ValueError("%s as window type is not supported." %
str(type(window)))
try:
winfunc = _win_equiv[winstr]
except KeyError:
raise ValueError("Unknown window type.")
params = (Nx,) + args + (sym,)
else:
winfunc = kaiser
params = (Nx, beta, sym)
return winfunc(*params)
| bsd-3-clause |
andyxhadji/incubator-airflow | setup.py | 1 | 13241 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from setuptools import setup, find_packages, Command
from setuptools.command.test import test as TestCommand
import imp
import logging
import os
import sys
import subprocess
logger = logging.getLogger(__name__)
# Kept manually in sync with airflow.__version__
version = imp.load_source(
'airflow.version', os.path.join('airflow', 'version.py')).version
PY3 = sys.version_info[0] == 3
# See LEGAL-362
def verify_gpl_dependency():
# The Read the Docs build environment [1] does a pip install of Airflow which cannot
# be overridden with custom environment variables, so we detect the READTHEDOCS env
# var they provide to set the env var that avoids the GPL dependency on install when
# building the docs site.
# [1]: http://docs.readthedocs.io/en/latest/builds.html#build-environment
if os.getenv("READTHEDOCS") == "True":
os.environ["SLUGIFY_USES_TEXT_UNIDECODE"] = "yes"
if (not os.getenv("AIRFLOW_GPL_UNIDECODE")
and not os.getenv("SLUGIFY_USES_TEXT_UNIDECODE") == "yes"):
raise RuntimeError("By default one of Airflow's dependencies installs a GPL "
"dependency (unidecode). To avoid this dependency set "
"SLUGIFY_USES_TEXT_UNIDECODE=yes in your environment when you "
"install or upgrade Airflow. To force installing the GPL "
"version set AIRFLOW_GPL_UNIDECODE")
class Tox(TestCommand):
user_options = [('tox-args=', None, "Arguments to pass to tox")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.tox_args = ''
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
errno = tox.cmdline(args=self.tox_args.split())
sys.exit(errno)
class CleanCommand(Command):
"""Custom clean command to tidy up the project root."""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
os.system('rm -vrf ./build ./dist ./*.pyc ./*.tgz ./*.egg-info')
class CompileAssets(Command):
"""
Custom compile assets command to compile and build the frontend
assets using npm and webpack.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
subprocess.call('./airflow/www_rbac/compile_assets.sh')
def git_version(version):
"""
Return a version to identify the state of the underlying git repo. The version will
indicate whether the head of the current git-backed working directory is tied to a
release tag or not : it will indicate the former with a 'release:{version}' prefix
and the latter with a 'dev0' prefix. Following the prefix will be a sha of the current
branch head. Finally, a "dirty" suffix is appended to indicate that uncommitted
changes are present.
"""
repo = None
try:
import git
repo = git.Repo('.git')
except ImportError:
logger.warning('gitpython not found: Cannot compute the git version.')
return ''
except Exception as e:
logger.warning('Cannot compute the git version. {}'.format(e))
return ''
if repo:
sha = repo.head.commit.hexsha
if repo.is_dirty():
return '.dev0+{sha}.dirty'.format(sha=sha)
# commit is clean
return '.release:{version}+{sha}'.format(version=version, sha=sha)
else:
return 'no_git_version'
def write_version(filename=os.path.join(*['airflow',
'git_version'])):
text = "{}".format(git_version(version))
with open(filename, 'w') as a:
a.write(text)
async_packages = [
'greenlet>=0.4.9',
'eventlet>= 0.9.7',
'gevent>=0.13'
]
atlas = ['atlasclient>=0.1.2']
azure_blob_storage = ['azure-storage>=0.34.0']
azure_data_lake = [
'azure-mgmt-resource==1.2.2',
'azure-mgmt-datalake-store==0.4.0',
'azure-datalake-store==0.0.19'
]
cassandra = ['cassandra-driver>=3.13.0']
celery = [
'celery>=4.1.1, <4.2.0',
'flower>=0.7.3, <1.0'
]
cgroups = [
'cgroupspy>=0.1.4',
]
# major update coming soon, clamp to 0.x
cloudant = ['cloudant>=0.5.9,<2.0']
crypto = ['cryptography>=0.9.3']
dask = [
'distributed>=1.17.1, <2'
]
databricks = ['requests>=2.5.1, <3']
datadog = ['datadog>=0.14.0']
doc = [
'mock',
'sphinx>=1.2.3',
'sphinx-argparse>=0.1.13',
'sphinx-rtd-theme>=0.1.6',
'Sphinx-PyPI-upload>=0.2.1'
]
docker = ['docker>=2.0.0']
druid = ['pydruid>=0.4.1']
elasticsearch = [
'elasticsearch>=5.0.0,<6.0.0',
'elasticsearch-dsl>=5.0.0,<6.0.0'
]
emr = ['boto3>=1.0.0, <1.8.0']
gcp_api = [
'httplib2>=0.9.2',
'google-api-python-client>=1.6.0, <2.0.0dev',
'google-auth>=1.0.0, <2.0.0dev',
'google-auth-httplib2>=0.0.1',
'google-cloud-container>=0.1.1',
'PyOpenSSL',
'pandas-gbq'
]
github_enterprise = ['Flask-OAuthlib>=0.9.1']
hdfs = ['snakebite>=2.7.8']
hive = [
'hmsclient>=0.1.0',
'pyhive>=0.6.0',
]
jdbc = ['jaydebeapi>=1.1.1']
jenkins = ['python-jenkins>=0.4.15']
jira = ['JIRA>1.0.7']
kerberos = ['pykerberos>=1.1.13',
'requests_kerberos>=0.10.0',
'thrift_sasl>=0.2.0',
'snakebite[kerberos]>=2.7.8']
kubernetes = ['kubernetes>=3.0.0',
'cryptography>=2.0.0']
ldap = ['ldap3>=0.9.9.1']
mssql = ['pymssql>=2.1.1']
mysql = ['mysqlclient>=1.3.6']
oracle = ['cx_Oracle>=5.1.2']
password = [
'bcrypt>=2.0.0',
'flask-bcrypt>=0.7.1',
]
pinot = ['pinotdb>=0.1.1']
postgres = ['psycopg2-binary>=2.7.4']
qds = ['qds-sdk>=1.9.6']
rabbitmq = ['librabbitmq>=1.6.1']
redis = ['redis>=2.10.5']
s3 = ['boto3>=1.7.0, <1.8.0']
salesforce = ['simple-salesforce>=0.72']
samba = ['pysmbclient>=0.1.3']
segment = ['analytics-python>=1.2.9']
sendgrid = ['sendgrid>=5.2.0']
slack = ['slackclient>=1.0.0']
mongo = ['pymongo>=3.6.0']
snowflake = ['snowflake-connector-python>=1.5.2',
'snowflake-sqlalchemy>=1.1.0']
ssh = ['paramiko>=2.1.1', 'pysftp>=0.2.9', 'sshtunnel>=0.1.4,<0.2']
statsd = ['statsd>=3.0.1, <4.0']
vertica = ['vertica-python>=0.5.1']
webhdfs = ['hdfs[dataframe,avro,kerberos]>=2.0.4']
winrm = ['pywinrm==0.2.2']
zendesk = ['zdesk']
all_dbs = postgres + mysql + hive + mssql + hdfs + vertica + cloudant + druid + pinot \
+ cassandra + mongo
devel = [
'click',
'freezegun',
'jira',
'lxml>=4.0.0',
'mock',
'mongomock',
'moto==1.1.19',
'nose',
'nose-ignore-docstring==0.2',
'nose-timer',
'parameterized',
'paramiko',
'pysftp',
'pywinrm',
'qds-sdk>=1.9.6',
'rednose',
'requests_mock',
'flake8'
]
if not PY3:
devel += ['unittest2']
devel_minreq = devel + kubernetes + mysql + doc + password + s3 + cgroups
devel_hadoop = devel_minreq + hive + hdfs + webhdfs + kerberos
devel_all = (sendgrid + devel + all_dbs + doc + samba + s3 + slack + crypto + oracle +
docker + ssh + kubernetes + celery + azure_blob_storage + redis + gcp_api +
datadog + zendesk + jdbc + ldap + kerberos + password + webhdfs + jenkins +
druid + pinot + segment + snowflake + elasticsearch + azure_data_lake +
atlas)
# Snakebite & Google Cloud Dataflow are not Python 3 compatible :'(
if PY3:
devel_ci = [package for package in devel_all if package not in
['snakebite>=2.7.8', 'snakebite[kerberos]>=2.7.8']]
else:
devel_ci = devel_all
def do_setup():
verify_gpl_dependency()
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(exclude=['tests*']),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.8.3, <0.9',
'bleach~=2.1.3',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.17, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.12.4, <0.13',
'flask-appbuilder>=1.11.1, <2.0.0',
'flask-admin==1.4.1',
'flask-caching>=1.3.3, <1.4.0',
'flask-login==0.2.11',
'flask-swagger==0.2.13',
'flask-wtf>=0.14.2, <0.15',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.4.0, <20.0',
'iso8601>=0.1.12',
'jinja2>=2.7.3, <2.9.0',
'lxml>=4.0.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'pendulum==1.4.4',
'psutil>=4.2.0, <6.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.15.0',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=1.1.15, <1.2.0',
'tabulate>=0.7.5, <=0.8.2',
'tenacity==4.8.0',
'thrift>=0.9.2',
'tzlocal>=1.4',
'unicodecsv>=0.14.1',
'werkzeug>=0.14.1, <0.15.0',
'zope.deprecation>=4.0, <5.0',
],
setup_requires=[
'docutils>=0.14, <1.0',
],
extras_require={
'all': devel_all,
'devel_ci': devel_ci,
'all_dbs': all_dbs,
'atlas': atlas,
'async': async_packages,
'azure_blob_storage': azure_blob_storage,
'azure_data_lake': azure_data_lake,
'cassandra': cassandra,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'doc': doc,
'docker': docker,
'druid': druid,
'elasticsearch': elasticsearch,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'jira': jira,
'kerberos': kerberos,
'kubernetes': kubernetes,
'ldap': ldap,
'mongo': mongo,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'pinot': pinot,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
'redis': redis,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'sendgrid': sendgrid,
'segment': segment,
'slack': slack,
'snowflake': snowflake,
'ssh': ssh,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'winrm': winrm
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='dev@airflow.incubator.apache.org',
url='http://airflow.incubator.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/incubator/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
'compile_assets': CompileAssets
},
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
)
if __name__ == "__main__":
do_setup()
| apache-2.0 |
lpsinger/astropy | astropy/visualization/wcsaxes/patches.py | 3 | 6832 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.patches import Polygon
from astropy import units as u
from astropy.coordinates.representation import UnitSphericalRepresentation
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product
__all__ = ['Quadrangle', 'SphericalCircle']
# Monkey-patch the docs to fix CapStyle and JoinStyle subs.
# TODO! delete when upstream fix matplotlib/matplotlib#19839
Polygon.__init__.__doc__ = Polygon.__init__.__doc__.replace(
"`.CapStyle`", "``matplotlib._enums.CapStyle``")
Polygon.__init__.__doc__ = Polygon.__init__.__doc__.replace(
"`.JoinStyle`", "``matplotlib._enums.JoinStyle``")
Polygon.set_capstyle.__doc__ = Polygon.set_capstyle.__doc__.replace(
"`.CapStyle`", "``matplotlib._enums.CapStyle``")
Polygon.set_joinstyle.__doc__ = Polygon.set_joinstyle.__doc__.replace(
"`.JoinStyle`", "``matplotlib._enums.JoinStyle``")
def _rotate_polygon(lon, lat, lon0, lat0):
"""
Given a polygon with vertices defined by (lon, lat), rotate the polygon
such that the North pole of the spherical coordinates is now at (lon0,
lat0). Therefore, to end up with a polygon centered on (lon0, lat0), the
polygon should initially be drawn around the North pole.
"""
# Create a representation object
polygon = UnitSphericalRepresentation(lon=lon, lat=lat)
# Determine rotation matrix to make it so that the circle is centered
# on the correct longitude/latitude.
m1 = rotation_matrix(-(0.5 * np.pi * u.radian - lat0), axis='y')
m2 = rotation_matrix(-lon0, axis='z')
transform_matrix = matrix_product(m2, m1)
# Apply 3D rotation
polygon = polygon.to_cartesian()
polygon = polygon.transform(transform_matrix)
polygon = UnitSphericalRepresentation.from_cartesian(polygon)
return polygon.lon, polygon.lat
class SphericalCircle(Polygon):
"""
Create a patch representing a spherical circle - that is, a circle that is
formed of all the points that are within a certain angle of the central
coordinates on a sphere. Here we assume that latitude goes from -90 to +90
This class is needed in cases where the user wants to add a circular patch
to a celestial image, since otherwise the circle will be distorted, because
a fixed interval in longitude corresponds to a different angle on the sky
depending on the latitude.
Parameters
----------
center : tuple or `~astropy.units.Quantity` ['angle']
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements.
radius : `~astropy.units.Quantity` ['angle']
The radius of the circle
resolution : int, optional
The number of points that make up the circle - increase this to get a
smoother circle.
vertex_unit : `~astropy.units.Unit`
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(self, center, radius, resolution=100, vertex_unit=u.degree, **kwargs):
# Extract longitude/latitude, either from a tuple of two quantities, or
# a single 2-element Quantity.
longitude, latitude = center
# Start off by generating the circle around the North pole
lon = np.linspace(0., 2 * np.pi, resolution + 1)[:-1] * u.radian
lat = np.repeat(0.5 * np.pi - radius.to_value(u.radian), resolution) * u.radian
lon, lat = _rotate_polygon(lon, lat, longitude, latitude)
# Extract new longitude/latitude in the requested units
lon = lon.to_value(vertex_unit)
lat = lat.to_value(vertex_unit)
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
super().__init__(vertices, **kwargs)
class Quadrangle(Polygon):
"""
Create a patch representing a latitude-longitude quadrangle.
The edges of the quadrangle lie on two lines of constant longitude and two
lines of constant latitude (or the equivalent component names in the
coordinate frame of interest, such as right ascension and declination).
Note that lines of constant latitude are not great circles.
Unlike `matplotlib.patches.Rectangle`, the edges of this patch will render
as curved lines if appropriate for the WCS transformation.
Parameters
----------
anchor : tuple or `~astropy.units.Quantity` ['angle']
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements.
width : `~astropy.units.Quantity` ['angle']
The width of the quadrangle in longitude (or, e.g., right ascension)
height : `~astropy.units.Quantity` ['angle']
The height of the quadrangle in latitude (or, e.g., declination)
resolution : int, optional
The number of points that make up each side of the quadrangle -
increase this to get a smoother quadrangle.
vertex_unit : `~astropy.units.Unit` ['angle']
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(self, anchor, width, height, resolution=100, vertex_unit=u.degree, **kwargs):
# Extract longitude/latitude, either from a tuple of two quantities, or
# a single 2-element Quantity.
longitude, latitude = u.Quantity(anchor).to_value(vertex_unit)
# Convert the quadrangle dimensions to the appropriate units
width = width.to_value(vertex_unit)
height = height.to_value(vertex_unit)
# Create progressions in longitude and latitude
lon_seq = longitude + np.linspace(0, width, resolution + 1)
lat_seq = latitude + np.linspace(0, height, resolution + 1)
# Trace the path of the quadrangle
lon = np.concatenate([lon_seq[:-1],
np.repeat(lon_seq[-1], resolution),
np.flip(lon_seq[1:]),
np.repeat(lon_seq[0], resolution)])
lat = np.concatenate([np.repeat(lat_seq[0], resolution),
lat_seq[:-1],
np.repeat(lat_seq[-1], resolution),
np.flip(lat_seq[1:])])
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
super().__init__(vertices, **kwargs)
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/core/config.py | 11 | 22966 | """
The config module holds package-wide configurables and provides
a uniform API for working with them.
Overview
========
This module supports the following requirements:
- options are referenced using keys in dot.notation, e.g. "x.y.option - z".
- keys are case-insensitive.
- functions should accept partial/regex keys, when unambiguous.
- options can be registered by modules at import time.
- options can be registered at init-time (via core.config_init)
- options have a default value, and (optionally) a description and
validation function associated with them.
- options can be deprecated, in which case referencing them
should produce a warning.
- deprecated options can optionally be rerouted to a replacement
so that accessing a deprecated option reroutes to a differently
named option.
- options can be reset to their default value.
- all option can be reset to their default value at once.
- all options in a certain sub - namespace can be reset at once.
- the user can set / get / reset or ask for the description of an option.
- a developer can register and mark an option as deprecated.
- you can register a callback to be invoked when the the option value
is set or reset. Changing the stored value is considered misuse, but
is not verboten.
Implementation
==============
- Data is stored using nested dictionaries, and should be accessed
through the provided API.
- "Registered options" and "Deprecated options" have metadata associcated
with them, which are stored in auxilary dictionaries keyed on the
fully-qualified key, e.g. "x.y.z.option".
- the config_init module is imported by the package's __init__.py file.
placing any register_option() calls there will ensure those options
are available as soon as pandas is loaded. If you use register_option
in a module, it will only be available after that module is imported,
which you should be aware of.
- `config_prefix` is a context_manager (for use with the `with` keyword)
which can save developers some typing, see the docstring.
"""
import re
from collections import namedtuple
from contextlib import contextmanager
import warnings
from pandas.compat import map, lmap, u
import pandas.compat as compat
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple('RegisteredOption',
'key defval doc validator cb')
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for pandas.options, backwards compatible with KeyError
checks
"""
#
# User API
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): %r' % pat)
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError("Must provide an even number of non-keyword "
"arguments")
# default to false
silent = kwargs.pop('silent', False)
if kwargs:
raise TypeError('_set_option() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
if silent:
with warnings.catch_warnings(record=True):
o.cb(key)
else:
o.cb(key)
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = u('')
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
def _reset_option(pat, silent=False):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
if len(keys) > 1 and len(pat) < 4 and pat != 'all':
raise ValueError('You must specify at least 4 characters when '
'resetting multiple keys, use the special keyword '
'"all" to reset all the options to their default '
'value')
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper(object):
""" provide attribute-style access to a nested dict"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __setattr__(self, key, val):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
v = object.__getattribute__(self, "d")[key]
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self):
return list(self.d.keys())
# For user convenience, we'd like to have the available options described
# in the docstring. For dev convenience we'd like to generate the docstrings
# dynamically instead of maintaining them by hand. To this, we use the
# class below which wraps functions inside a callable, and converts
# __doc__ into a propery function. The doctsrings below are templates
# using the py2.6+ advanced formatting syntax to plug in a concise list
# of options, and option descriptions.
class CallableDynamicDoc(object):
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds):
return self.__func__(*args, **kwds)
@property
def __doc__(self):
opts_desc = _describe_option('all', _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(opts_desc=opts_desc,
opts_list=opts_list)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value :
new value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with not arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context(object):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
...
"""
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError('Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).')
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self):
undo = []
for pat, val in self.ops:
undo.append((pat, _get_option(pat, silent=True)))
self.undo = undo
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(key, defval, doc='', validator=None, cb=None):
"""Register an option in the package-wide pandas config object
Parameters
----------
key - a fully-qualified key, e.g. "x.y.option - z".
defval - the default value of the option
doc - a string description of the option
validator - a function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb - a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Returns
-------
Nothing.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import tokenize
import keyword
key = key.lower()
if key in _registered_options:
raise OptionError("Option '%s' has already been registered" % key)
if key in _reserved_keys:
raise OptionError("Option '%s' is a reserved key" % key)
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split('.')
for k in path:
if not bool(re.match('^' + tokenize.Name + '$', k)):
raise ValueError("%s is not a valid identifier" % k)
if keyword.iskeyword(k):
raise ValueError("%s is a python keyword" % k)
cursor = _global_config
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:i]))
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option" %
'.'.join(path[:-1]))
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(key=key, defval=defval,
doc=doc, validator=validator,
cb=cb)
def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key - the name of the option to be deprecated. must be a fully-qualified
option name (e.g "x.y.z.rkey").
msg - (Optional) a warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey - (Optional) the name of an option to reroute access to.
If specified, any referenced `key` will be re-routed to `rkey`
including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver - (Optional) specifies the version in which this option will
be removed. used by the default message if no `msg`
is specified.
Returns
-------
Nothing
Raises
------
OptionError - if key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError("Option '%s' has already been defined as deprecated."
% key)
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key):
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key):
""" Returns True if the given option has been deprecated """
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key):
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key):
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, DeprecationWarning)
else:
msg = "'%s' is deprecated" % key
if d.removal_ver:
msg += ' and will be removed in %s' % d.removal_ver
if d.rkey:
msg += ", please use '%s' instead." % d.rkey
else:
msg += ', please refrain from using it.'
warnings.warn(msg, DeprecationWarning)
return True
return False
def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = u('%s ') % k
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description available.'
if o:
s += u('\n [default: %s] [currently: %s]') % (o.defval,
_get_option(k, True))
if d:
s += u('\n (Deprecated')
s += (u(', use `%s` instead.') % d.rkey if d.rkey else '')
s += u(')')
s += '\n\n'
return s
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = ('- ' + name + '.[' if name else '')
ls = wrap(', '.join(ks), width, initial_indent=pfx,
subsequent_indent=' ', break_long_words=False)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[:x.rfind('.')]):
ks = [x[len(k) + 1:] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s
#
# helpers
@contextmanager
def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example:
import pandas.core.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option, reset_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '%s.%s' % (prefix, key)
return func(pkey, *args, **kwds)
return inner
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type):
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which raises
ValueError if type(x) is not equal to `_type`
"""
def inner(x):
if type(x) != _type:
raise ValueError("Value must have type '%s'" % str(_type))
return inner
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which raises
ValueError if x is not an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
from pandas.io.formats.printing import pprint_thing
type_repr = "|".join(map(pprint_thing, _type))
else:
type_repr = "'%s'" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner
def is_one_of_factory(legal_values):
callables = [c for c in legal_values if callable(c)]
legal_values = [c for c in legal_values if not callable(c)]
def inner(x):
from pandas.io.formats.printing import pprint_thing as pp
if x not in legal_values:
if not any([c(x) for c in callables]):
pp_values = pp("|".join(lmap(pp, legal_values)))
msg = "Value must be one of {0}".format(pp_values)
if len(callables):
msg += " or a callable"
raise ValueError(msg)
return inner
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
is_unicode = is_type_factory(compat.text_type)
is_text = is_instance_factory((str, bytes))
def is_callable(obj):
"""
Parameters
----------
`obj` - the object to be checked
Returns
-------
validator - returns True if object is callable
raises ValueError otherwise.
"""
if not callable(obj):
raise ValueError("Value must be a callable")
return True
| mit |
vybstat/scikit-learn | sklearn/tests/test_common.py | 70 | 7717 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
CROSS_DECOMPOSITION,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier,
check_transformer_n_iter,
check_non_transformer_estimators_n_iter,
check_get_params_invariance,
check_fit2d_predict1d,
check_fit1d_1sample)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield check, name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_balanced_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
def test_get_params_invariance():
# Test for estimators that support get_params, that
# get_params(deep=False) is a subset of get_params(deep=True)
# Related to issue #4465
estimators = all_estimators(include_meta_estimators=False, include_other=True)
for name, Estimator in estimators:
if hasattr(Estimator, 'get_params'):
yield check_get_params_invariance, name, Estimator
| bsd-3-clause |
michaeljohnbennett/zipline | zipline/utils/cli.py | 10 | 8343 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import argparse
from copy import copy
from six import print_
from six.moves import configparser
import pandas as pd
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
PYGMENTS = True
except:
PYGMENTS = False
import zipline
from zipline.errors import NoSourceError, PipelineDateError
DEFAULTS = {
'data_frequency': 'daily',
'capital_base': '10e6',
'source': 'yahoo',
'symbols': 'AAPL',
'metadata_index': 'symbol',
'source_time_column': 'Date',
}
def parse_args(argv, ipython_mode=False):
"""Parse list of arguments.
If a config file is provided (via -c), it will read in the
supplied options and overwrite any global defaults.
All other directly supplied arguments will overwrite the config
file settings.
Arguments:
* argv : list of strings
List of arguments, e.g. ['-c', 'my.conf']
* ipython_mode : bool <default=True>
Whether to parse IPython specific arguments
like --local_namespace
Notes:
Default settings can be found in zipline.utils.cli.DEFAULTS.
"""
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file",
metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(argv)
defaults = copy(DEFAULTS)
if args.conf_file:
config = configparser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Defaults")))
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
description="Zipline version %s." % zipline.__version__,
parents=[conf_parser]
)
parser.set_defaults(**defaults)
parser.add_argument('--algofile', '-f')
parser.add_argument('--data-frequency',
choices=('minute', 'daily'))
parser.add_argument('--start', '-s')
parser.add_argument('--end', '-e')
parser.add_argument('--capital_base')
parser.add_argument('--source', '-d', choices=('yahoo',))
parser.add_argument('--source_time_column', '-t')
parser.add_argument('--symbols')
parser.add_argument('--output', '-o')
parser.add_argument('--metadata_path', '-m')
parser.add_argument('--metadata_index', '-x')
parser.add_argument('--print-algo', '-p', dest='print_algo',
action='store_true')
parser.add_argument('--no-print-algo', '-q', dest='print_algo',
action='store_false')
if ipython_mode:
parser.add_argument('--local_namespace', action='store_true')
args = parser.parse_args(remaining_argv)
return(vars(args))
def parse_cell_magic(line, cell):
"""Parse IPython magic
"""
args_list = line.split(' ')
args = parse_args(args_list, ipython_mode=True)
# Remove print_algo kwarg to overwrite below.
args.pop('print_algo')
local_namespace = args.pop('local_namespace', False)
# By default, execute inside IPython namespace
if not local_namespace:
args['namespace'] = get_ipython().user_ns # flake8: noqa
# If we are running inside NB, do not output to file but create a
# variable instead
output_var_name = args.pop('output', None)
perf = run_pipeline(print_algo=False, algo_text=cell, **args)
if output_var_name is not None:
get_ipython().user_ns[output_var_name] = perf # flake8: noqa
def run_pipeline(print_algo=True, **kwargs):
"""Runs a full zipline pipeline given configuration keyword
arguments.
1. Load data (start and end dates can be provided a strings as
well as the source and symobls).
2. Instantiate algorithm (supply either algo_text or algofile
kwargs containing initialize() and handle_data() functions). If
algofile is supplied, will try to look for algofile_analyze.py and
append it.
3. Run algorithm (supply capital_base as float).
4. Return performance dataframe.
:Arguments:
* print_algo : bool <default=True>
Whether to print the algorithm to command line. Will use
pygments syntax coloring if pygments is found.
"""
start = kwargs['start']
end = kwargs['end']
# Compare against None because strings/timestamps may have been given
if start is not None:
start = pd.Timestamp(start, tz='UTC')
if end is not None:
end = pd.Timestamp(end, tz='UTC')
# Fail out if only one bound is provided
if ((start is None) or (end is None)) and (start != end):
raise PipelineDateError(start=start, end=end)
# Check if start and end are provided, and if the sim_params need to read
# a start and end from the DataSource
if start is None:
overwrite_sim_params = True
else:
overwrite_sim_params = False
symbols = kwargs['symbols'].split(',')
asset_identifier = kwargs['metadata_index']
# Pull asset metadata
asset_metadata = kwargs.get('asset_metadata', None)
asset_metadata_path = kwargs['metadata_path']
# Read in a CSV file, if applicable
if asset_metadata_path is not None:
if os.path.isfile(asset_metadata_path):
asset_metadata = pd.read_csv(asset_metadata_path,
index_col=asset_identifier)
source_arg = kwargs['source']
source_time_column = kwargs['source_time_column']
if source_arg is None:
raise NoSourceError()
elif source_arg == 'yahoo':
source = zipline.data.load_bars_from_yahoo(
stocks=symbols, start=start, end=end)
elif os.path.isfile(source_arg):
source = zipline.data.load_prices_from_csv(
filepath=source_arg,
identifier_col=source_time_column
)
elif os.path.isdir(source_arg):
source = zipline.data.load_prices_from_csv_folder(
folderpath=source_arg,
identifier_col=source_time_column
)
else:
raise NotImplementedError(
'Source %s not implemented.' % kwargs['source'])
algo_text = kwargs.get('algo_text', None)
if algo_text is None:
# Expect algofile to be set
algo_fname = kwargs['algofile']
with open(algo_fname, 'r') as fd:
algo_text = fd.read()
if print_algo:
if PYGMENTS:
highlight(algo_text, PythonLexer(), TerminalFormatter(),
outfile=sys.stdout)
else:
print_(algo_text)
algo = zipline.TradingAlgorithm(script=algo_text,
namespace=kwargs.get('namespace', {}),
capital_base=float(kwargs['capital_base']),
algo_filename=kwargs.get('algofile'),
equities_metadata=asset_metadata,
identifiers=symbols,
start=start,
end=end)
perf = algo.run(source, overwrite_sim_params=overwrite_sim_params)
output_fname = kwargs.get('output', None)
if output_fname is not None:
perf.to_pickle(output_fname)
return perf
| apache-2.0 |
zpincus/zplib | zplib/image/sample_texture.py | 2 | 2094 | import numpy
from sklearn import cluster
from . import _sample_texture
sample_texture = _sample_texture.sample_texture
sample_ar_texture = _sample_texture.sample_ar_texture
def subsample_mask(mask, max_points):
"""Return a mask containing at most max_points 'True' values, each of which
is located somewhere within the original mask.
This is useful for sampling textures where it is neither necessary nor practical
to sample EVERY pixel of potential interest. Instead, a random subset of the
pixels of interest is selected.
"""
mask = numpy.asarray(mask) > 0
num_points = mask.sum()
if num_points > max_points:
z = numpy.zeros(num_points, dtype=bool)
z[:max_points] = 1
mask = mask.copy()
mask[mask] = numpy.random.permutation(z)
return mask
def bin_by_texture_class(image, num_classes, mask=None, size=3):
"""Return an image where pixels are replaced by the "texture class" that
that pixel belongs to.
Textures are sampled using sample_ar_texture and then clustered with k-means
clustering. An image is returned where each pixel represents the label of
its texture cluster.
Parameters:
image: 2-dimensional numpy array of type uint8, uint16, or float32
num_classes: number of clusters to identify with k-means
mask: optional mask for which pixels to examine
size: size of the ar feature window (see sample_ar_texture)
"""
texture_samples = sample_ar_texture(image, mask, size)
kmeans = cluster.MiniBatchKMeans(n_clusters=64, max_iter=300)
kmeans.fit(texture_samples)
dtype = numpy.uint16 if num_classes > 256 else numpy.uint8
labeled_image = numpy.zeros(image.shape, dtype)
# if not image.flags.fortran:
# labeled_image = labeled_image.T
# if mask is not None:
# mask = mask.T
if mask is not None:
labeled_image[mask] = kmeans.labels_
else:
labeled_image.flat = kmeans.labels_
# if not image.flags.fortran:
# labeled_image = labeled_image.T
return labeled_image | mit |
dvspirito/pymeasure | pymeasure/experiment/results.py | 1 | 13071 | #
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2017 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
import os
import re
import sys
from copy import deepcopy
from importlib.machinery import SourceFileLoader
from datetime import datetime
import pandas as pd
from .procedure import Procedure, UnknownProcedure
from .parameters import Parameter
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
def unique_filename(directory, prefix='DATA', suffix='', ext='csv',
dated_folder=False, index=True, datetimeformat="%Y-%m-%d"):
""" Returns a unique filename based on the directory and prefix
"""
now = datetime.now()
directory = os.path.abspath(directory)
if dated_folder:
directory = os.path.join(directory, now.strftime('%Y-%m-%d'))
if not os.path.exists(directory):
os.makedirs(directory)
if index:
i = 1
basename = "%s%s" % (prefix, now.strftime(datetimeformat))
basepath = os.path.join(directory, basename)
filename = "%s_%d%s.%s" % (basepath, i, suffix, ext)
while os.path.exists(filename):
i += 1
filename = "%s_%d%s.%s" % (basepath, i, suffix, ext)
else:
basename = "%s%s%s.%s" % (prefix, now.strftime(datetimeformat), suffix, ext)
filename = os.path.join(directory, basename)
return filename
class CSVFormatter(logging.Formatter):
""" Formatter of data results """
def __init__(self, columns, delimiter=','):
"""Creates a csv formatter for a given list of columns (=header).
:param columns: list of column names.
:type columns: list
:param delimiter: delimiter between columns.
:type delimiter: str
"""
super().__init__()
self.columns = columns
self.delimiter = delimiter
def format(self, record):
"""Formats a record as csv.
:param record: record to format.
:type record: dict
:return: a string
"""
return self.delimiter.join('{}'.format(record[x]) for x in self.columns)
def format_header(self):
return self.delimiter.join(self.columns)
class Results(object):
""" The Results class provides a convenient interface to reading and
writing data in connection with a :class:`.Procedure` object.
:cvar COMMENT: The character used to identify a comment (default: #)
:cvar DELIMITER: The character used to delimit the data (default: ,)
:cvar LINE_BREAK: The character used for line breaks (default \\n)
:cvar CHUNK_SIZE: The length of the data chuck that is read
:param procedure: Procedure object
:param data_filename: The data filename where the data is or should be
stored
"""
COMMENT = '#'
DELIMITER = ','
LINE_BREAK = "\n"
CHUNK_SIZE = 1000
def __init__(self, procedure, data_filename):
if not isinstance(procedure, Procedure):
raise ValueError("Results require a Procedure object")
self.procedure = procedure
self.procedure_class = procedure.__class__
self.parameters = procedure.parameter_objects()
self._header_count = -1
self.formatter = CSVFormatter(columns=self.procedure.DATA_COLUMNS)
if isinstance(data_filename, (list, tuple)):
data_filenames, data_filename = data_filename, data_filename[0]
else:
data_filenames = [data_filename]
self.data_filename = data_filename
self.data_filenames = data_filenames
if os.path.exists(data_filename): # Assume header is already written
self.reload()
self.procedure.status = Procedure.FINISHED
# TODO: Correctly store and retrieve status
else:
for filename in self.data_filenames:
with open(filename, 'w') as f:
f.write(self.header())
f.write(self.labels())
self._data = None
def __getstate__(self):
# Get all information needed to reconstruct procedure
self._parameters = self.procedure.parameter_values()
self._class = self.procedure.__class__.__name__
module = sys.modules[self.procedure.__module__]
self._package = module.__package__
self._module = module.__name__
self._file = module.__file__
state = self.__dict__.copy()
del state['procedure']
del state['procedure_class']
return state
def __setstate__(self, state):
self.__dict__.update(state)
# Restore the procedure
module = SourceFileLoader(self._module, self._file).load_module()
cls = getattr(module, self._class)
self.procedure = cls()
self.procedure.set_parameters(self._parameters)
self.procedure.refresh_parameters()
self.procedure_class = cls
del self._parameters
del self._class
del self._package
del self._module
del self._file
def header(self):
""" Returns a text header to accompany a datafile so that the procedure
can be reconstructed
"""
h = []
procedure = re.search("'(?P<name>[^']+)'",
repr(self.procedure_class)).group("name")
h.append("Procedure: <%s>" % procedure)
h.append("Parameters:")
for name, parameter in self.parameters.items():
h.append("\t%s: %s" % (parameter.name, str(parameter)))
h.append("Data:")
self._header_count = len(h)
h = [Results.COMMENT + l for l in h] # Comment each line
return Results.LINE_BREAK.join(h) + Results.LINE_BREAK
def labels(self):
""" Returns the columns labels as a string to be written
to the file
"""
return self.formatter.format_header() + Results.LINE_BREAK
def format(self, data):
""" Returns a formatted string containing the data to be written
to a file
"""
return self.formatter.format(data)
def parse(self, line):
""" Returns a dictionary containing the data from the line """
data = {}
items = line.split(Results.DELIMITER)
for i, key in enumerate(self.procedure.DATA_COLUMNS):
data[key] = items[i]
return data
@staticmethod
def parse_header(header, procedure_class=None):
""" Returns a Procedure object with the parameters as defined in the
header text.
"""
if procedure_class is not None:
procedure = procedure_class()
else:
procedure = None
header = header.split(Results.LINE_BREAK)
procedure_module = None
parameters = {}
for line in header:
if line.startswith(Results.COMMENT):
line = line[1:] # Uncomment
else:
raise ValueError("Parsing a header which contains "
"uncommented sections")
if line.startswith("Procedure"):
regex = "<(?:(?P<module>[^>]+)\.)?(?P<class>[^.>]+)>"
search = re.search(regex, line)
procedure_module = search.group("module")
procedure_class = search.group("class")
elif line.startswith("\t"):
regex = ("\t(?P<name>[^:]+):\s(?P<value>[^\s]+)"
"(?:\s(?P<units>.+))?")
search = re.search(regex, line)
if search is None:
raise Exception("Error parsing header line %s." % line)
else:
parameters[search.group("name")] = (
search.group("value"),
search.group("units")
)
if procedure is None:
if procedure_class is None:
raise ValueError("Header does not contain the Procedure class")
try:
from importlib import import_module
procedure_module = import_module(procedure_module)
procedure_class = getattr(procedure_module, procedure_class)
procedure = procedure_class()
except ImportError:
procedure = UnknownProcedure(parameters)
log.warning("Unknown Procedure being used")
except Exception as e:
raise e
def units_found(parameter, units):
return (hasattr(parameter, 'units') and
parameter.units is None and
isinstance(parameter, Parameter) and
units is not None)
# Fill the procedure with the parameters found
for name, parameter in procedure.parameter_objects().items():
if parameter.name in parameters:
value, units = parameters[parameter.name]
if units_found(parameter, units):
# Force full string to be matched
value = value + " " + str(units)
setattr(procedure, name, value)
else:
raise Exception("Missing '%s' parameter when loading '%s' class" % (
parameter.name, procedure_class))
procedure.refresh_parameters() # Enforce update of meta data
return procedure
@staticmethod
def load(data_filename, procedure_class=None):
""" Returns a Results object with the associated Procedure object and
data
"""
header = ""
header_read = False
header_count = 0
with open(data_filename, 'r') as f:
while not header_read:
line = f.readline()
if line.startswith(Results.COMMENT):
header += line.strip() + Results.LINE_BREAK
header_count += 1
else:
header_read = True
procedure = Results.parse_header(header[:-1], procedure_class)
results = Results(procedure, data_filename)
results._header_count = header_count
return results
@property
def data(self):
# Need to update header count for correct referencing
if self._header_count == -1:
self._header_count = len(
self.header()[-1].split(Results.LINE_BREAK))
if self._data is None or len(self._data) == 0:
# Data has not been read
try:
self.reload()
except Exception:
# Empty dataframe
self._data = pd.DataFrame(columns=self.procedure.DATA_COLUMNS)
else: # Concatenate additional data
skiprows = len(self._data) + self._header_count
chunks = pd.read_csv(
self.data_filename,
comment=Results.COMMENT,
header=0,
names=self._data.columns,
chunksize=Results.CHUNK_SIZE, skiprows=skiprows, iterator=True
)
try:
tmp_frame = pd.concat(chunks, ignore_index=True)
self._data = pd.concat([self._data, tmp_frame],
ignore_index=True)
except Exception:
pass # All data is up to date
return self._data
def reload(self):
""" Preforms a full reloading of the file data, neglecting
any changes in the comments
"""
chunks = pd.read_csv(
self.data_filename,
comment=Results.COMMENT,
chunksize=Results.CHUNK_SIZE,
iterator=True
)
try:
self._data = pd.concat(chunks, ignore_index=True)
except Exception:
self._data = chunks.read()
def __repr__(self):
return "<{}(filename='{}',procedure={},shape={})>".format(
self.__class__.__name__, self.data_filename,
self.procedure.__class__.__name__,
self.data.shape
)
| mit |
iABC2XYZ/abc | DM/BTL/T3.py | 1 | 4445 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 12 14:52:28 2017
@author: p
NN
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from PreData import RandItemMulti
from testPredict import DealBeta,RoundItemMultiPack,CalLatticeSingle
plt.close('all')
def GenWeight(shape):
initial = tf.truncated_normal(shape, stddev=1.)
return tf.Variable(initial)
def GenBias(shape):
initial = tf.constant(1., shape=shape)
return tf.Variable(initial)
numItem=64
numSample=2**8
numQuadHigh=20
weightSize=[numSample*4,2**6,2**4,numQuadHigh*3]
wFC1 = GenWeight([weightSize[0],weightSize[1]])
bFC1=GenWeight([weightSize[1]])
wFC2 = GenWeight([weightSize[1],weightSize[2]])
bFC2=GenWeight([weightSize[2]])
wFC3 = GenWeight([weightSize[2],weightSize[3]])
bFC3=GenWeight([weightSize[3]])
xInput=tf.placeholder(tf.float32,shape=[None,weightSize[0]])
yInput=tf.placeholder(tf.float32,shape=[None,weightSize[-1]])
xFC1=tf.matmul(xInput,wFC1)+bFC1
xAct1=tf.nn.relu(xFC1)
xFC2=tf.matmul(xAct1,wFC2)+bFC2
xAct2=tf.nn.relu(xFC2)
xFC3=tf.matmul(xAct2,wFC3)+bFC3
xFinal=xFC3
xOutput=xFinal
yOutput=yInput
xOutputMat=tf.reshape(xOutput,(numQuadHigh,3))
costFunc=tf.reduce_sum((yOutput-xOutput)**2)
trainBTL=tf.train.AdamOptimizer(0.03)
optBTL=trainBTL.minimize(costFunc)
iniBTL=tf.global_variables_initializer()
zGivenLearn=np.array([0,2,3,5,6,7,9,12,15,16,17])
betaXGivenLearn=np.sin(zGivenLearn+np.random.random(np.size(zGivenLearn)))+3
betaYGivenLearn=-np.sin(zGivenLearn+np.random.random(np.size(zGivenLearn)))+3
numQuadLearn=5
zGivenValidate=np.array([0,1,2,4,6,9,11,14,16,19,21])
betaXGivenValidate=np.sin(zGivenValidate-np.random.random(np.size(zGivenValidate)))+3
betaYGivenValidate=-np.sin(zGivenValidate-np.random.random(np.size(zGivenValidate)))+3
numQuadValidate=5
dataBeamLearn=DealBeta(zGivenLearn,betaXGivenLearn,betaYGivenLearn,numSample,numQuadLearn)
dataBeamValidate=DealBeta(zGivenLearn,betaXGivenLearn,betaYGivenLearn,numSample,numQuadValidate)
zGivenLearn=dataBeamLearn[:,0]
betaXGivenLearn=dataBeamLearn[:,1]
betaYGivenLearn=dataBeamLearn[:,2]
plt.figure(0)
plt.clf()
plt.subplot(121)
plt.hold('on')
plt.plot(zGivenLearn,betaXGivenLearn,'b')
plt.title('X')
plt.subplot(122)
plt.hold('on')
plt.plot(zGivenLearn,betaYGivenLearn,'b')
plt.title('Y')
numRun=500
se= tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
se.run(iniBTL)
for _ in range(numRun):
dataLatticeLearn=se.run(xOutputMat,feed_dict={xInput:dataBeamLearn.reshape(1,weightSize[0])})
dataLattice,dataBeam=RoundItemMultiPack(numItem,dataBeamLearn,dataLatticeLearn)
dataBeam[dataBeam==np.inf]=9.e300
dataBeam[np.isnan(dataBeam)]=9.e300
se.run(optBTL,feed_dict={xInput:dataBeam.reshape(numItem,weightSize[0]),yInput:dataLattice.reshape(numItem,weightSize[-1])})
###
ZLearn,betaXLearn,betaYLearn=CalLatticeSingle(dataBeamLearn,dataLatticeLearn)
driftL=dataLatticeLearn[:,0]
quadK=dataLatticeLearn[:,1]
quadL=dataLatticeLearn[:,2]
print dataLatticeLearn
plt.figure(1)
plt.clf()
plt.subplot(121)
plt.hold('on')
plt.plot(driftL,'b-*')
plt.plot(quadK,'g-*')
plt.plot(quadL,'r-*')
plt.title(_)
plt.subplot(222)
plt.hold('on')
plt.plot(zGivenLearn,betaXGivenLearn,'b')
plt.plot(ZLearn,betaXLearn,'r')
plt.title('X')
plt.subplot(224)
plt.hold('on')
plt.plot(zGivenLearn,betaYGivenLearn,'b')
plt.plot(ZLearn,betaYLearn,'r')
plt.title('Y')
plt.pause(0.005)
se.close()
print('END')
'''
numRun=1
costRec=np.zeros(numRun)
with tf.Session() as se:
se.run(iniBTL)
dataLatticeLearn=se.run(xOutput,feed_dict={xInput:dataBeamLearn.reshape(numItem,weightSize[0])})
for _ in xrange(numRun):
dataLattice,dataBeam=RandItemMulti(numItem,numSample,numQuadHigh)
#T1=se.run(costFunc,feed_dict={xInput:dataBeam.reshape(numItem,weightSize[0]),yInput:dataLattice.reshape(numItem,weightSize[-1])})
se.run(optBTL,feed_dict={xInput:dataBeam.reshape(numItem,weightSize[0]),yInput:dataLattice.reshape(numItem,weightSize[-1])})
T2=se.run(costFunc,feed_dict={xInput:dataBeam.reshape(numItem,weightSize[0]),yInput:dataLattice.reshape(numItem,weightSize[-1])})
costRec[_]=T2
print str(np.round((np.float32(_)/np.float32(numRun))*100.))+'%'
'''
| gpl-3.0 |
mdhaber/scipy | tools/refguide_check.py | 5 | 32497 | #!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --doctests optimize
"""
import copy
import doctest
import glob
import inspect
import io
import os
import re
import shutil
import sys
import tempfile
import warnings
from argparse import ArgumentParser
from contextlib import contextmanager, redirect_stderr
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
import docutils.core
import numpy as np
import sphinx
from docutils.parsers.rst import directives
from pkg_resources import parse_version
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
if parse_version(sphinx.__version__) >= parse_version('1.5'):
# Enable specific Sphinx directives
from sphinx.directives.other import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
else:
# Remove sphinx directives that don't run without Sphinx environment.
# Sphinx < 1.5 installs all directives on import...
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'fft',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'signal.windows',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'spatial.transform',
'special',
'stats',
'stats.mstats',
'stats.contingency',
'stats.qmc',
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.misc.who', # comes from numpy
'scipy.optimize.show_options',
'scipy.integrate.quad_explain',
'io.rst', # XXX: need to figure out how to deal w/ mat files
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.spatial\.distance',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
r'scipy\.special\..*_roots', # old aliases for scipy.special.*_roots
r'scipy\.special\.jn', # alias for jv
r'scipy\.ndimage\.sum', # alias for sum_labels
r'scipy\.integrate\.simps', # alias for simpson
r'scipy\.integrate\.trapz', # alias for trapezoid
r'scipy\.integrate\.cumtrapz', # alias for cumulative_trapezoid
r'scipy\.linalg\.solve_lyapunov', # deprecated name
r'scipy\.stats\.contingency\.chi2_contingency',
r'scipy\.stats\.contingency\.expected_freq',
r'scipy\.stats\.contingency\.margins',
r'scipy\.stats\.reciprocal',
r'scipy\.stats\.trapz', # alias for trapezoid
]
# deprecated windows in scipy.signal namespace
for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
'kaiser', 'nuttall', 'parzen', 'triang', 'tukey'):
REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except Exception:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = only_ref.intersection(deprecated)
only_ref = only_ref.difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
output += "\nThis issue can be fixed by adding these objects to\n"
output += "the function listing in __init__.py for this module\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
output += "\nThis issue should likely be fixed by removing these objects\n"
output += "from the function listing in __init__.py for this module\n"
output += "or adding them to __all__.\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'versionchanged', 'module', 'class', 'meth',
'ref', 'func', 'toctree', 'moduleauthor', 'deprecated',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except Exception:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float32': np.float32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
def try_convert_namedtuple(got):
# suppose that "got" is smth like MoodResult(statistic=10, pvalue=0.1).
# Then convert it to the tuple (10, 0.1), so that can later compare tuples.
num = got.count('=')
if num == 0:
# not a nameduple, bail out
return got
regex = (r'[\w\d_]+\(' +
', '.join([r'[\w\d_]+=(.+)']*num) +
r'\)')
grp = re.findall(regex, got.replace('\n', ' '))
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return got_again
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
self._had_unexpected_error = False
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
# Ignore name errors after failing due to an unexpected exception
exception_type = exc_info[0]
if self._had_unexpected_error and exception_type is NameError:
return
self._had_unexpected_error = True
self._report_item_name(out)
return super().report_unexpected_exception(
out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile(r'at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
'.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except Exception:
# Maybe we're printing a numpy array? This produces invalid python
# code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
# values. So, reinsert commas and retry.
# TODO: handle (1) abberivation (`print(np.arange(10000))`), and
# (2) n-dim arrays with n > 1
s_want = want.strip()
s_got = got.strip()
cond = (s_want.startswith("[") and s_want.endswith("]") and
s_got.startswith("[") and s_got.endswith("]"))
if cond:
s_want = ", ".join(s_want[1:-1].split())
s_got = ", ".join(s_got[1:-1].split())
return self.check_output(s_want, s_got, optionflags)
if "=" not in want and "=" not in got:
# if we're here, want and got cannot be eval-ed (hence cannot
# be converted to numpy objects), they are not namedtuples
# (those must have at least one '=' sign).
# Thus they should have compared equal with vanilla doctest.
# Since they did not, it's an error.
return False
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
got_again = try_convert_namedtuple(got)
want_again = try_convert_namedtuple(want)
except Exception:
return False
else:
return self.check_output(want_again, got_again, optionflags)
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogeneous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = io.StringIO(newline='')
success = True
# Redirect stderr to the stdout or output
tmp_stderr = sys.stdout if doctest_warnings else output
from scipy._lib._util import _fixed_default_rng
@contextmanager
def temp_cwd():
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
# Run tests, trying to restore global state afterward
cwd = os.getcwd()
with np.errstate(), np.printoptions(), temp_cwd(), \
redirect_stderr(tmp_stderr), \
_fixed_default_rng():
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=output.write)
if fails > 0:
success = False
output.seek(0)
return success, output.read()
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except Exception:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
refguide can be signalled to skip testing code by adding
``#doctest: +SKIP`` to the end of the line. If the output varies or is
random, add ``# may vary`` or ``# random`` to the comment. for example
>>> plt.plot(...) # doctest: +SKIP
>>> random.randint(0,10)
5 # random
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
with open(fname, encoding='utf-8') as f:
text = f.read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-tutorial", action="store_true",
help="Skip running doctests in the tutorial.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
if args.module_names:
args.skip_tutorial = True
else:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_tutorial:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_tutorial:
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
tut_path = os.path.join(base_dir, 'doc', 'source', 'tutorial', '*.rst')
print('\nChecking tutorial files at %s:' % os.path.relpath(tut_path, os.getcwd()))
for filename in sorted(glob.glob(tut_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(filename, (args.verbose >= 2),
dots=dots, doctest_warnings=args.doctest_warnings)
def scratch():
pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| bsd-3-clause |
dh4gan/oberon | plot/plot_EBMlog_multiplevariables.py | 1 | 3151 | # Written 17/1/14 by dh4gan
# Code reads in log files from EBM and plots them
import matplotlib.pyplot as plt
import numpy as np
import filefinder.localfiles as ff
import io_oberon.io_EBM
# Find log files
inputfile = ff.find_local_input_files('*.log')
prefix = inputfile[:-4]
xkey, ykeys, ix, iylist, nyvar = io_oberon.io_EBM.select_multiple_variables(io_oberon.io_EBM.logvariablekeys, io_oberon.io_EBM.logcoldict, io_oberon.io_EBM.lognamedict)
# Read in data
print 'Reading ',inputfile
data = np.genfromtxt(inputfile)
# Find maximum and minimum time limits to plot
xmin = np.amin(data[:,ix])
xmax = np.amax(data[:,ix])
ymin = []
ymax = []
for iy in iylist:
ymin.append(np.amin(data[:,iy]))
ymax.append(np.amax(data[:,iy]))
ymin = np.array(ymin)
ymax = np.array(ymax)
print 'Data xmin, xmax: ', xmin, ' , ',xmax
for i in range(nyvar):
print 'Data ymin, ymax: ', ymin[i], ' , ',ymax[i]
newlimits = raw_input("Set new x limits? (y/n) ")
if newlimits=='y':
xmin = input("What is the minimum x value? ")
xmax = input("What is the maximum x value? ")
# Find x points in this range
inxrange = np.where(np.logical_and(data[:,ix]>xmin,data[:,ix]<xmax))
# Find minimum and maximum y in this range
for i in range(nyvar):
ymin[i] = np.amin(data[inxrange,iylist[i]])
ymax[i] = np.amax(data[inxrange,iylist[i]])
print 'New '+ykeys[i]+' limits: ',ymin[i], ymax[i]
outputfile = ''
for i in range(nyvar):
outputfile = outputfile + ykeys[i]+'_'
outputfile = outputfile+'vs_'+xkey+'.png'
npoints = len(data[:,ix])
# If npoints large, prompt user for subsampling
nsubsample = 0
if(npoints > 1.0e4):
nsubsample = input('Attempting to plot '+str(npoints)+' points: subsampling is recommended! \nEnter subsampling factor (enter 0 to skip):')
nsubsample = int(nsubsample)
#globalymin = 0.0
#globalymax= 1.0
#if(ymax-ymin < 1.0):
# ymax = ymax +0.5
# ymin = ymin -0.5
# Make figure
print xmin, xmax
fig1 = plt.figure()
ax = fig1.add_subplot(111)
ax.set_xlabel(io_oberon.io_EBM.lognamedict[xkey], fontsize = 16)
ax.set_ylabel(io_oberon.io_EBM.lognamedict[ykeys[0]], fontsize = 16)
ax.set_xlim(xmin,xmax)
if(nsubsample>0):
lns1 = ax.plot(data[0::nsubsample,ix],data[0::nsubsample,iylist[0]],label=io_oberon.io_EBM.lognamedict[ykeys[0]], color='blue')
else:
lns1 = ax.plot(data[:,ix],data[:,iylist[0]],label=io_oberon.io_EBM.lognamedict[ykeys[0]], color='blue')
ax2 = ax.twinx()
ax2.set_xlim(xmin,xmax)
ax2.set_ylim(ymin[1],ymax[1])
ax2.set_ylabel(io_oberon.io_EBM.lognamedict[ykeys[1]], fontsize = 16)
if(nsubsample>0):
lns2 = ax2.plot(data[0::nsubsample,ix],data[0::nsubsample,iylist[1]],label=io_oberon.io_EBM.lognamedict[ykeys[1]], color='green', linestyle='dashed')
else:
lns2 = ax2.plot(data[:,ix],data[:,iylist[1]],label=io_oberon.io_EBM.lognamedict[ykeys[1]], color='green', linestyle='dashed')
lns = lns1+lns2
labs = [l.get_label() for l in lns]
ax2.legend(lns,labs,bbox_to_anchor=(0.,1.02,1.,1.02),loc=3,ncol=2,mode="expand",borderaxespad=0)
#ax2.legend(loc = 'upper right')
fig1.savefig(outputfile, format='png')
| gpl-3.0 |
kubeflow/community | scripts/project_stats.py | 1 | 7517 | """A script to create a plot of the number of issues in a project.
Uses GitHub's API v4 which uses graphql
https://developer.github.com/v4/
For more instructions see the the corresponding Jupyter notebook:
project_stats.ipynb
"""
import argparse
import datetime
from dateutil import parser as date_parser
import json
import logging
import numpy as np
import os
import pandas as pd
import pprint
import requests
def run_query(query, headers): # A simple function to use requests.post to make the API call. Note the json= section.
request = requests.post('https://api.github.com/graphql', json={'query': query}, headers=headers)
if request.status_code == 200:
return request.json()
else:
raise Exception("Query failed to run by returning code of {}. {}".format(request.status_code, query))
class ProjectStats(object):
def __init__(self, project):
self.query_template = None
self.project = project
def init_df(self, offset=0, size=300):
"""Initialize a dataframe of the specified size."""
return pd.DataFrame({
"time": [datetime.datetime.now()] * size,
"delta": np.zeros(size),
"label": [""] * size,
}, index=offset + np.arange(size))
def grow_df(self, df, offset=0, size=300):
return pd.concat([df, self.init_df(offset, size)])
def main(self):
self.fetch_data()
self.compute_stats()
def compute_stats(self):
# Compute a column to store total delta
total_delta = np.max(np.row_stack([self.data["delta"].values,
np.zeros(self.data.shape[0])]), axis=0)
self.data["total_delta"] = total_delta
self.stats = self.data.pivot_table(values=["delta", "total_delta"],
index=['time'],
columns=['label'], fill_value=0,
aggfunc=np.sum)
self.stats = self.stats.cumsum()
self.stats = self.stats.rename(mapper={"delta": "open", "total_delta":"total"},
axis='columns')
def fetch_data(self):
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser(
description="Find issues that need attention.")
# Create a gitor using an access token
if not os.getenv("GITHUB_TOKEN"):
logging.error("Environment variable GITHUB_TOKEN must be set")
return
# We need to look at ProjectCard and then ProjectCard item
# https://developer.github.com/v4/object/projectcard/
# TODO(jlewi): Take project as an argument
self.query_template="""{{
organization(login:"kubeflow") {{
projects(last:1 search:"{project}") {{
totalCount
edges {{
node {{
name
url
columns(first:1 {columns_cursor}) {{
totalCount
pageInfo {{
endCursor
hasNextPage
}}
edges {{
node {{
cards(first:100 {cards_cursor}) {{
totalCount
pageInfo {{
endCursor
hasNextPage
}}
edges {{
node {{
content {{
__typename
... on Issue {{
url
title
number
createdAt
closedAt
labels(last:15) {{
totalCount
edges {{
node {{
name
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
}}
"""
# Times at which issues were opened and closed
opened = []
closed = []
headers = {"Authorization": "Bearer {0}".format(os.getenv("GITHUB_TOKEN"))}
columns_cursor = None
has_next_columns_page = True
issues = []
issue_numbers = []
# Create a dataframe to store the results
data = self.init_df()
num_items = 0
# We have to paginations to do
# Over ccolumns and over cards
while has_next_columns_page:
columns_cursor_text = ""
if columns_cursor:
columns_cursor_text = "after:\"{0}\"".format(columns_cursor)
has_next_cards_page = True
cards_cursor = None
while has_next_cards_page:
cards_cursor_text = ""
if cards_cursor:
cards_cursor_text = "after:\"{0}\"".format(cards_cursor)
query = self.query_template.format(project=self.project,
columns_cursor=columns_cursor_text,
cards_cursor=cards_cursor_text)
result = run_query(query, headers=headers) # Execute the query
projects_connections = result["data"]["organization"]["projects"]
if projects_connections["totalCount"] != 1:
raise ValueError("Total number of projects: Got {0} want 1".format(
projects_connections["totalCount"]))
project = projects_connections["edges"][0]["node"]
columns_connection = project["columns"]
cards_connection = columns_connection["edges"][0]["node"]["cards"]
cards_cursor = cards_connection["pageInfo"]["endCursor"]
has_next_cards_page = cards_connection["pageInfo"]["hasNextPage"]
# If we reached the end of cards for this column increment the columns_page
# cards cursor
if not has_next_cards_page:
has_next_columns_page = columns_connection["pageInfo"]["hasNextPage"]
columns_cursor = columns_connection["pageInfo"]["endCursor"]
for e in cards_connection["edges"]:
n = e["node"]
c = n["content"]
if not c:
continue
# Cards can contain pull requests and these may not have labels
if not "labels" in c:
continue
labels_connections = c["labels"]
if labels_connections["totalCount"] > 15:
raise ValueError("Number of total labels exceeds the number "
"fetched; need to add pagination")
labels = labels_connections["edges"]
label_names = []
for l in labels:
label_names.append(l["node"]["name"])
if not label_names:
label_names.append("nolabels")
num_entries = len(label_names) * 2
if num_items + num_entries > data.shape[0]:
# Grow the dataframe
data = self.grow_df(data, offset=data.shape[0])
for f in ["createdAt", "closedAt"]:
if not c[f]:
continue
delta = 1
if f == "closedAt":
delta = -1
for l in label_names:
if delta > 0:
data["time"].at[num_items] = date_parser.parse(c["createdAt"])
else:
data["time"].at[num_items] = date_parser.parse(c["closedAt"])
data["delta"].at[num_items] = delta
data["label"].at[num_items] = l
num_items += 1
self.data = data[:num_items]
if __name__ == "__main__":
c = ProjectStats()
c.main() | apache-2.0 |
liang42hao/bokeh | bokeh/compat/mplexporter/exporter.py | 32 | 12403 | """
Matplotlib Exporter
===================
This submodule contains tools for crawling a matplotlib figure and exporting
relevant pieces to a renderer.
"""
import warnings
import io
from . import utils
import matplotlib
from matplotlib import transforms
from matplotlib.backends.backend_agg import FigureCanvasAgg
class Exporter(object):
"""Matplotlib Exporter
Parameters
----------
renderer : Renderer object
The renderer object called by the exporter to create a figure
visualization. See mplexporter.Renderer for information on the
methods which should be defined within the renderer.
close_mpl : bool
If True (default), close the matplotlib figure as it is rendered. This
is useful for when the exporter is used within the notebook, or with
an interactive matplotlib backend.
"""
def __init__(self, renderer, close_mpl=True):
self.close_mpl = close_mpl
self.renderer = renderer
def run(self, fig):
"""
Run the exporter on the given figure
Parmeters
---------
fig : matplotlib.Figure instance
The figure to export
"""
# Calling savefig executes the draw() command, putting elements
# in the correct place.
if fig.canvas is None:
fig.canvas = FigureCanvasAgg(fig)
fig.savefig(io.BytesIO(), format='png', dpi=fig.dpi)
if self.close_mpl:
import matplotlib.pyplot as plt
plt.close(fig)
self.crawl_fig(fig)
@staticmethod
def process_transform(transform, ax=None, data=None, return_trans=False,
force_trans=None):
"""Process the transform and convert data to figure or data coordinates
Parameters
----------
transform : matplotlib Transform object
The transform applied to the data
ax : matplotlib Axes object (optional)
The axes the data is associated with
data : ndarray (optional)
The array of data to be transformed.
return_trans : bool (optional)
If true, return the final transform of the data
force_trans : matplotlib.transform instance (optional)
If supplied, first force the data to this transform
Returns
-------
code : string
Code is either "data", "axes", "figure", or "display", indicating
the type of coordinates output.
transform : matplotlib transform
the transform used to map input data to output data.
Returned only if return_trans is True
new_data : ndarray
Data transformed to match the given coordinate code.
Returned only if data is specified
"""
if isinstance(transform, transforms.BlendedGenericTransform):
warnings.warn("Blended transforms not yet supported. "
"Zoom behavior may not work as expected.")
if force_trans is not None:
if data is not None:
data = (transform - force_trans).transform(data)
transform = force_trans
code = "display"
if ax is not None:
for (c, trans) in [("data", ax.transData),
("axes", ax.transAxes),
("figure", ax.figure.transFigure),
("display", transforms.IdentityTransform())]:
if transform.contains_branch(trans):
code, transform = (c, transform - trans)
break
if data is not None:
if return_trans:
return code, transform.transform(data), transform
else:
return code, transform.transform(data)
else:
if return_trans:
return code, transform
else:
return code
def crawl_fig(self, fig):
"""Crawl the figure and process all axes"""
with self.renderer.draw_figure(fig=fig,
props=utils.get_figure_properties(fig)):
for ax in fig.axes:
self.crawl_ax(ax)
def crawl_ax(self, ax):
"""Crawl the axes and process all elements within"""
with self.renderer.draw_axes(ax=ax,
props=utils.get_axes_properties(ax)):
for line in ax.lines:
self.draw_line(ax, line)
for text in ax.texts:
self.draw_text(ax, text)
for (text, ttp) in zip([ax.xaxis.label, ax.yaxis.label, ax.title],
["xlabel", "ylabel", "title"]):
if(hasattr(text, 'get_text') and text.get_text()):
self.draw_text(ax, text, force_trans=ax.transAxes,
text_type=ttp)
for artist in ax.artists:
# TODO: process other artists
if isinstance(artist, matplotlib.text.Text):
self.draw_text(ax, artist)
for patch in ax.patches:
self.draw_patch(ax, patch)
for collection in ax.collections:
self.draw_collection(ax, collection)
for image in ax.images:
self.draw_image(ax, image)
legend = ax.get_legend()
if legend is not None:
props = utils.get_legend_properties(ax, legend)
with self.renderer.draw_legend(legend=legend, props=props):
if props['visible']:
self.crawl_legend(ax, legend)
def crawl_legend(self, ax, legend):
"""
Recursively look through objects in legend children
"""
legendElements = list(utils.iter_all_children(legend._legend_box,
skipContainers=True))
legendElements.append(legend.legendPatch)
for child in legendElements:
# force a large zorder so it appears on top
child.set_zorder(1E6 + child.get_zorder())
try:
# What kind of object...
if isinstance(child, matplotlib.patches.Patch):
self.draw_patch(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.text.Text):
if not (child is legend.get_children()[-1]
and child.get_text() == 'None'):
self.draw_text(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.lines.Line2D):
self.draw_line(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.collections.Collection):
self.draw_collection(ax, child,
force_pathtrans=ax.transAxes)
else:
warnings.warn("Legend element %s not impemented" % child)
except NotImplementedError:
warnings.warn("Legend element %s not impemented" % child)
def draw_line(self, ax, line, force_trans=None):
"""Process a matplotlib line and call renderer.draw_line"""
coordinates, data = self.process_transform(line.get_transform(),
ax, line.get_xydata(),
force_trans=force_trans)
linestyle = utils.get_line_style(line)
if linestyle['dasharray'] is None:
linestyle = None
markerstyle = utils.get_marker_style(line)
if (markerstyle['marker'] in ['None', 'none', None]
or markerstyle['markerpath'][0].size == 0):
markerstyle = None
label = line.get_label()
if markerstyle or linestyle:
self.renderer.draw_marked_line(data=data, coordinates=coordinates,
linestyle=linestyle,
markerstyle=markerstyle,
label=label,
mplobj=line)
def draw_text(self, ax, text, force_trans=None, text_type=None):
"""Process a matplotlib text object and call renderer.draw_text"""
content = text.get_text()
if content:
transform = text.get_transform()
position = text.get_position()
coords, position = self.process_transform(transform, ax,
position,
force_trans=force_trans)
style = utils.get_text_style(text)
self.renderer.draw_text(text=content, position=position,
coordinates=coords,
text_type=text_type,
style=style, mplobj=text)
def draw_patch(self, ax, patch, force_trans=None):
"""Process a matplotlib patch object and call renderer.draw_path"""
vertices, pathcodes = utils.SVG_path(patch.get_path())
transform = patch.get_transform()
coordinates, vertices = self.process_transform(transform,
ax, vertices,
force_trans=force_trans)
linestyle = utils.get_path_style(patch, fill=patch.get_fill())
self.renderer.draw_path(data=vertices,
coordinates=coordinates,
pathcodes=pathcodes,
style=linestyle,
mplobj=patch)
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
"""Process a matplotlib collection and call renderer.draw_collection"""
(transform, transOffset,
offsets, paths) = collection._prepare_points()
offset_coords, offsets = self.process_transform(
transOffset, ax, offsets, force_trans=force_offsettrans)
path_coords = self.process_transform(
transform, ax, force_trans=force_pathtrans)
processed_paths = [utils.SVG_path(path) for path in paths]
processed_paths = [(self.process_transform(
transform, ax, path[0], force_trans=force_pathtrans)[1], path[1])
for path in processed_paths]
path_transforms = collection.get_transforms()
try:
# matplotlib 1.3: path_transforms are transform objects.
# Convert them to numpy arrays.
path_transforms = [t.get_matrix() for t in path_transforms]
except AttributeError:
# matplotlib 1.4: path transforms are already numpy arrays.
pass
styles = {'linewidth': collection.get_linewidths(),
'facecolor': collection.get_facecolors(),
'edgecolor': collection.get_edgecolors(),
'alpha': collection._alpha,
'zorder': collection.get_zorder()}
offset_dict = {"data": "before",
"screen": "after"}
offset_order = offset_dict[collection.get_offset_position()]
self.renderer.draw_path_collection(paths=processed_paths,
path_coordinates=path_coords,
path_transforms=path_transforms,
offsets=offsets,
offset_coordinates=offset_coords,
offset_order=offset_order,
styles=styles,
mplobj=collection)
def draw_image(self, ax, image):
"""Process a matplotlib image object and call renderer.draw_image"""
self.renderer.draw_image(imdata=utils.image_to_base64(image),
extent=image.get_extent(),
coordinates="data",
style={"alpha": image.get_alpha(),
"zorder": image.get_zorder()},
mplobj=image)
| bsd-3-clause |
zuotingbing/spark | python/pyspark/sql/pandas/typehints.py | 8 | 6323 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql.pandas.utils import require_minimum_pandas_version
def infer_eval_type(sig):
"""
Infers the evaluation type in :class:`pyspark.rdd.PythonEvalType` from
:class:`inspect.Signature` instance.
"""
from pyspark.sql.pandas.functions import PandasUDFType
require_minimum_pandas_version()
import pandas as pd
annotations = {}
for param in sig.parameters.values():
if param.annotation is not param.empty:
annotations[param.name] = param.annotation
# Check if all arguments have type hints
parameters_sig = [annotations[parameter] for parameter
in sig.parameters if parameter in annotations]
if len(parameters_sig) != len(sig.parameters):
raise ValueError(
"Type hints for all parameters should be specified; however, got %s" % sig)
# Check if the return has a type hint
return_annotation = sig.return_annotation
if sig.empty is return_annotation:
raise ValueError(
"Type hint for the return type should be specified; however, got %s" % sig)
# Series, Frame or Union[DataFrame, Series], ... -> Series or Frame
is_series_or_frame = (
all(a == pd.Series or # Series
a == pd.DataFrame or # DataFrame
check_union_annotation( # Union[DataFrame, Series]
a,
parameter_check_func=lambda na: na == pd.Series or na == pd.DataFrame)
for a in parameters_sig) and
(return_annotation == pd.Series or return_annotation == pd.DataFrame))
# Iterator[Tuple[Series, Frame or Union[DataFrame, Series], ...] -> Iterator[Series or Frame]
is_iterator_tuple_series_or_frame = (
len(parameters_sig) == 1 and
check_iterator_annotation( # Iterator
parameters_sig[0],
parameter_check_func=lambda a: check_tuple_annotation( # Tuple
a,
parameter_check_func=lambda ta: (
ta == Ellipsis or # ...
ta == pd.Series or # Series
ta == pd.DataFrame or # DataFrame
check_union_annotation( # Union[DataFrame, Series]
ta,
parameter_check_func=lambda na: (
na == pd.Series or na == pd.DataFrame))))) and
check_iterator_annotation(
return_annotation,
parameter_check_func=lambda a: a == pd.DataFrame or a == pd.Series))
# Iterator[Series, Frame or Union[DataFrame, Series]] -> Iterator[Series or Frame]
is_iterator_series_or_frame = (
len(parameters_sig) == 1 and
check_iterator_annotation(
parameters_sig[0],
parameter_check_func=lambda a: (
a == pd.Series or # Series
a == pd.DataFrame or # DataFrame
check_union_annotation( # Union[DataFrame, Series]
a,
parameter_check_func=lambda ua: ua == pd.Series or ua == pd.DataFrame))) and
check_iterator_annotation(
return_annotation,
parameter_check_func=lambda a: a == pd.DataFrame or a == pd.Series))
# Series, Frame or Union[DataFrame, Series], ... -> Any
is_series_or_frame_agg = (
all(a == pd.Series or # Series
a == pd.DataFrame or # DataFrame
check_union_annotation( # Union[DataFrame, Series]
a,
parameter_check_func=lambda ua: ua == pd.Series or ua == pd.DataFrame)
for a in parameters_sig) and (
# It's tricky to whitelist which types pd.Series constructor can take.
# Simply blacklist common types used here for now (which becomes object
# types Spark can't recognize).
return_annotation != pd.Series and
return_annotation != pd.DataFrame and
not check_iterator_annotation(return_annotation) and
not check_tuple_annotation(return_annotation)
))
if is_series_or_frame:
return PandasUDFType.SCALAR
elif is_iterator_tuple_series_or_frame or is_iterator_series_or_frame:
return PandasUDFType.SCALAR_ITER
elif is_series_or_frame_agg:
return PandasUDFType.GROUPED_AGG
else:
raise NotImplementedError("Unsupported signature: %s." % sig)
def check_tuple_annotation(annotation, parameter_check_func=None):
# Python 3.6 has `__name__`. Python 3.7 and 3.8 have `_name`.
# Check if the name is Tuple first. After that, check the generic types.
name = getattr(annotation, "_name", getattr(annotation, "__name__", None))
return name == "Tuple" and (
parameter_check_func is None or all(map(parameter_check_func, annotation.__args__)))
def check_iterator_annotation(annotation, parameter_check_func=None):
name = getattr(annotation, "_name", getattr(annotation, "__name__", None))
return name == "Iterator" and (
parameter_check_func is None or all(map(parameter_check_func, annotation.__args__)))
def check_union_annotation(annotation, parameter_check_func=None):
import typing
# Note that we cannot rely on '__origin__' in other type hints as it has changed from version
# to version. For example, it's abc.Iterator in Python 3.7 but typing.Iterator in Python 3.6.
origin = getattr(annotation, "__origin__", None)
return origin == typing.Union and (
parameter_check_func is None or all(map(parameter_check_func, annotation.__args__)))
| apache-2.0 |
harisbal/pandas | pandas/_version.py | 2 | 16219 | # This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
from pandas.compat import PY3
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig(object):
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "pandas-"
cfg.versionfile_source = "pandas/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run {dispcmd}".format(dispcmd=dispcmd))
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if PY3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run {dispcmd} (error)".format(dispcmd=dispcmd))
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '{root}', but '{dirname}' "
"doesn't start with prefix '{parentdir_prefix}'".format(
root=root, dirname=dirname,
parentdir_prefix=parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '{}', no digits".format(",".join(refs - tags)))
if verbose:
print("likely tags: {}".format(",".join(sorted(tags))))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking {r}".format(r=r))
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in {root}".format(root=root))
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: "
"'{describe_out}'".format(
describe_out=describe_out))
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '{full_tag}' doesn't start with prefix " \
"'{tag_prefix}'"
print(fmt.format(full_tag=full_tag, tag_prefix=tag_prefix))
pieces["error"] = ("tag '{full_tag}' doesn't start with "
"prefix '{tag_prefix}'".format(
full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "{:d}.g{}".format(pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.{:d}.g{}".format(pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post{:d}".format(pieces["distance"])
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g{}".format(pieces["short"])
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g{}".format(pieces["short"])
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-{:d}-g{}".format(pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-{:d}-g{}".format(pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '{style}'".format(style=style))
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| bsd-3-clause |
yaodi833/shorttext | document_classification_20newsgroups.py | 1 | 9740 | # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
print("done in %fs" % (time() - t0))
print()
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = np.asarray(vectorizer.get_feature_names())
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.f1_score(y_test, pred)
print("f1-score: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
class L1LinearSVC(LinearSVC):
def fit(self, X, y):
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
self.transformer_ = LinearSVC(penalty="l1",
dual=False, tol=1e-3)
X = self.transformer_.fit_transform(X, y)
return LinearSVC.fit(self, X, y)
def predict(self, X):
X = self.transformer_.transform(X)
return LinearSVC.predict(self, X)
print('=' * 80)
print("LinearSVC with L1-based feature selection")
results.append(benchmark(L1LinearSVC()))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show() | unlicense |
sniemi/SamPy | sandbox/deStripeACS.py | 1 | 2864 | import matplotlib
matplotlib.use('PDF')
from itertools import izip, count
import pyfits as pf
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.ticker import NullFormatter
input = 'flt.fits'
sigma = 5
#function begins
inp = input.replace('.fits', '')
fh = pf.open(input)
data = fh[1].data
org = data.copy()
dqarr = fh[3].data
medians = []
for i, l, dq in izip(count(), data, dqarr):
msk = ~(dq > 0)
d = l[msk]
#mask additionally everything above x sigma
sig = np.median(d) + sigma*np.std(d)
msk2 = d < sig
median = np.median(d[msk2])
print i, median
if ~np.isnan(median):
data[i] -= median
medians.append(median)
else:
print 'Will not remove nan median on line %i' % i
medians = np.asarray(medians)
#medmed1 = np.median(medians)
medmed2 = np.median(org[~(dqarr > 0)])
data += medmed2
#print medmed1, medmed2
fh.writeto(inp+'destriped.fits')
fh.close()
plt.figure()
plt.title(inp)
ims = plt.imshow(data / org, origin='lower', vmin=0.98, vmax=1.02)
cb = plt.colorbar(ims)
cb.set_label('Destriped / Original')
plt.savefig(inp+'ratio.pdf')
plt.close()
nullfmt = NullFormatter()
#KDE1
#est1 = []
#vals = medians-medmed1
#kde = scipy.stats.gaussian_kde(vals)
#for x in np.arange(np.int(np.min(vals)), np.int(np.max(vals)), 0.1):
# y = kde.evaluate(x)[0]
# est1.append([x, y])
#est1 = np.asarray(est1)
#KDE2
est2 = []
vals = medians-medmed2
kde = scipy.stats.gaussian_kde(vals)
for x in np.arange(np.int(np.min(vals)), np.int(np.max(vals)), 0.1):
y = kde.evaluate(x)[0]
est2.append([x, y])
est2 = np.asarray(est2)
#plt.figure()
#gs = gridspec.GridSpec(2, 1, height_ratios=[4,1])
#gs.update(wspace=0.0, hspace=0.0, top=0.96, bottom=0.07)
#axScatter = plt.subplot(gs[0])
#axHist = plt.subplot(gs[1])
#axScatter.set_title(inp)
#axScatter.plot(medians-medmed1, np.arange(len(medians)), 'bo')
#axScatter.xaxis.set_major_formatter(nullfmt)
#n, bins, patches = axHist.hist(medians-medmed1, bins=35, normed=True)
#axHist.plot(est1[:,0], est1[:,1], 'r-', label='Gaussian KDE')
#axHist.set_xlabel('Medians')
#axScatter.set_ylabel('Row')
#axScatter.set_ylim(-2, 2046)
#axHist.legend()
#plt.savefig(inp+'dist1.pdf')
#plt.close()
plt.figure()
gs = gridspec.GridSpec(2, 1, height_ratios=[4,1])
gs.update(wspace=0.0, hspace=0.0, top=0.96, bottom=0.07)
axScatter = plt.subplot(gs[0])
axHist = plt.subplot(gs[1])
axScatter.set_title(inp)
axScatter.plot(medians-medmed2, np.arange(len(medians)), 'bo')
axScatter.xaxis.set_major_formatter(nullfmt)
n, bins, patches = axHist.hist(medians-medmed2, bins=35, normed=True)
axHist.plot(est2[:,0], est2[:,1], 'r-', label='Gaussian KDE')
axHist.set_xlabel('Medians')
axScatter.set_ylabel('Row')
axScatter.set_ylim(-1, 2046)
axHist.legend()
plt.savefig(inp+'dist.pdf')
plt.close()
| bsd-2-clause |
bassio/omicexperiment | omicexperiment/transforms/diversity.py | 1 | 5663 | import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist, cdist, squareform
from skbio.diversity import alpha_diversity
from skbio.diversity import beta_diversity
from omicexperiment.transforms.transform import Transform
from omicexperiment.transforms.general import RarefactionFunction
class AlphaDiversity(Transform):
def __init__(self, distance_metric, **kwargs):
self.distance_metric = distance_metric
self.kwargs = kwargs
def __dapply__(self, experiment):
otu_ids = experiment.data_df.index
sample_ids = experiment.data_df.columns
matrix = experiment.data_df.T.as_matrix()
try:
alpha = alpha_diversity(self.distance_metric, counts=matrix, ids=sample_ids, **self.kwargs)
except ValueError as e:
otu_ids_err_msg = "``otu_ids`` is required for phylogenetic diversity metrics."
if str(e) == otu_ids_err_msg:
alpha = alpha_diversity(self.distance_metric, counts=matrix,
ids=sample_ids, otu_ids=otu_ids,
**self.kwargs)
else:
raise(e)
return alpha.to_frame(name=self.distance_metric).transpose()
def __eapply__(self, experiment):
distance_matrix_df = self.__dapply__(experiment)
return experiment.with_data_df(distance_matrix_df)
class BetaDiversity(Transform):
def __init__(self, distance_metric, **kwargs):
self.distance_metric = distance_metric
self.kwargs = kwargs
def __dapply__(self, experiment):
otu_ids = experiment.data_df.index
df = experiment.data_df.transpose()
try:
dm = beta_diversity(self.distance_metric, counts=df.as_matrix(), otu_ids=otu_ids, **self.kwargs)
except TypeError as e:
if 'takes no keyword arguments' in str(e):
dm = beta_diversity(self.distance_metric, counts=df.as_matrix(), **self.kwargs)
else:
raise(e)
distance_matrix_df = pd.DataFrame(dm.data, index=df.index, columns=df.index)
return distance_matrix_df
def __eapply__(self, experiment):
distance_matrix_df = self.__dapply__(experiment)
new_exp = experiment.with_data_df(distance_matrix_df)
new_exp.metadata['distance_metric'] = self.distance_metric
return experiment.with_data_df(distance_matrix_df)
class GroupwiseDistances(Transform):
def __init__(self, grouping_col, include_between_dists=True, include_within_dists=False, **kwargs):
self.grouping_col = grouping_col
self.include_between_dists = include_between_dists
self.include_within_dists = include_within_dists
self.kwargs = kwargs
def __dapply__(self, experiment):
from collections import OrderedDict
from itertools import combinations
grouping_col = self.grouping_col
distance_metric = experiment.metadata['distance_metric']
distances_df = pd.DataFrame({distance_metric: squareform(experiment.data_df)},
index=pd.MultiIndex.from_tuples(tuple(combinations(experiment.data_df.index, 2)), names=['sample_1', 'sample_2'])).reset_index()
group_1 = experiment.mapping_df[[grouping_col]].reindex(distances_df['sample_1'])
group_2 = experiment.mapping_df[[grouping_col]].reindex(distances_df['sample_2'])
distances_df['group_1'] = group_1.reset_index()[grouping_col]
distances_df['group_2'] = group_2.reset_index()[grouping_col]
all_within = distances_df[distances_df['group_1'] == distances_df['group_2']]
all_within_with_label = all_within.copy()
all_within_with_label['label'] = 'all_within'
all_within_with_label = all_within_with_label.set_index('label')[[distance_metric]]
all_between = distances_df[distances_df['group_1'] != distances_df['group_2']]
all_between_with_label = all_between.copy()
all_between_with_label['label'] = 'all_between'
all_between_with_label = all_between_with_label.set_index('label')[[distance_metric]]
dist_dataframes = OrderedDict()
dist_dataframes['all_within'] = all_within_with_label
dist_dataframes['all_between'] = all_between_with_label
if self.include_between_dists:
for grp1, grp2 in combinations(experiment.mapping_df[grouping_col].unique(), 2):
lbl = "{}_vs_{}".format(grp1, grp2)
dist_df = distances_df[(distances_df['group_1'] == grp1) & (distances_df['group_2'] == grp2)]
dist_df_w_label = dist_df.copy()
dist_df_w_label['label'] = lbl
dist_df_w_label = dist_df_w_label.set_index('label')[[distance_metric]]
dist_dataframes[lbl] = dist_df_w_label
if self.include_within_dists:
for grp in experiment.mapping_df[grouping_col].unique():
lbl = "within_{}".format(grp)
dist_df = distances_df[(distances_df['group_1'] == grp) & (distances_df['group_2'] == grp)]
dist_df_w_label = dist_df.copy()
dist_df_w_label['label'] = lbl
dist_df_w_label = dist_df_w_label.set_index('label')[[distance_metric]]
dist_dataframes[lbl] = dist_df_w_label
return pd.concat([v for k,v in dist_dataframes.items()])
def __eapply__(self, experiment):
groupwise_distances_df = self.__dapply__(experiment)
return experiment.with_data_df(groupwise_distances_df)
| bsd-3-clause |
soxofaan/luigi | test/contrib/pai_test.py | 8 | 3501 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Open Targets
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Tests for OpenPAI wrapper for Luigi.
Written and maintained by Liu, Dongqing (@liudongqing).
"""
from helpers import unittest
import responses
import time
import luigi
import logging
from luigi.contrib.pai import PaiTask
from luigi.contrib.pai import TaskRole
logging.basicConfig(level=logging.DEBUG)
"""
The following configurations are required to run the test
[OpenPai]
pai_url:http://host:port/
username:admin
password:admin-password
expiration:3600
"""
class SklearnJob(PaiTask):
image = "openpai/pai.example.sklearn"
name = "test_job_sk_{0}".format(time.time())
command = 'cd scikit-learn/benchmarks && python bench_mnist.py'
virtual_cluster = 'spark'
tasks = [TaskRole('test', 'cd scikit-learn/benchmarks && python bench_mnist.py', memoryMB=4096)]
class TestPaiTask(unittest.TestCase):
@responses.activate
def test_success(self):
"""
Here using the responses lib to mock the PAI rest api call, the following specify the response of the call.
"""
responses.add(responses.POST, 'http://127.0.0.1:9186/api/v1/token',
json={"token": "test", "user": "admin", "admin": True}, status=200)
sk_task = SklearnJob()
responses.add(responses.POST, 'http://127.0.0.1:9186/api/v1/jobs',
json={"message": "update job {0} successfully".format(sk_task.name)}, status=202)
responses.add(responses.GET, 'http://127.0.0.1:9186/api/v1/jobs/{0}'.format(sk_task.name),
json={}, status=404)
responses.add(responses.GET, 'http://127.0.0.1:9186/api/v1/jobs/{0}'.format(sk_task.name),
body='{"jobStatus": {"state":"SUCCEED"}}', status=200)
success = luigi.build([sk_task], local_scheduler=True)
self.assertTrue(success)
self.assertTrue(sk_task.complete())
@responses.activate
def test_fail(self):
"""
Here using the responses lib to mock the PAI rest api call, the following specify the response of the call.
"""
responses.add(responses.POST, 'http://127.0.0.1:9186/api/v1/token',
json={"token": "test", "user": "admin", "admin": True}, status=200)
fail_task = SklearnJob()
responses.add(responses.POST, 'http://127.0.0.1:9186/api/v1/jobs',
json={"message": "update job {0} successfully".format(fail_task.name)}, status=202)
responses.add(responses.GET, 'http://127.0.0.1:9186/api/v1/jobs/{0}'.format(fail_task.name),
json={}, status=404)
responses.add(responses.GET, 'http://127.0.0.1:9186/api/v1/jobs/{0}'.format(fail_task.name),
body='{"jobStatus": {"state":"FAILED"}}', status=200)
success = luigi.build([fail_task], local_scheduler=True)
self.assertFalse(success)
self.assertFalse(fail_task.complete())
| apache-2.0 |
quantumjot/PyFolding | pyfolding/models.py | 1 | 40806 | #!/usr/bin/env python
"""
Python implementation of common model fitting operations to
analyse protein folding data. Simply automates some fitting
and value calculation. Will be extended to include phi-value
analysis and other common calculations.
Allows for quick model evaluation and plotting.
Also tried to make this somewhat abstract and modular to
enable more interesting calculations, such as Ising models
and such.
Requirements (recommended python 2.7+):
- numpy
- scipy
- matplotlib
Lowe, A.R. 2015
"""
import sys
import inspect
import numpy as np
import scipy as sp
from . import core
from . import constants
__author__ = "Alan R. Lowe"
__email__ = "a.lowe@ucl.ac.uk"
def list_models():
""" List the kinetic of equilibrium models defined in this module.
Returns a list of the names of the models, whose parent class is
FitModel.
"""
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
verif = lambda cls: 'Verified: {0}'.format(cls[1]().verified)
fit_models = [ (cls[0], verif(cls)) for cls in clsmembers if cls[1].__bases__[0] == core.FitModel ]
return fit_models
class TemplateModel(core.FitModel):
""" A template model for expansion
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([])
def fit_func(self, x):
raise NotImplementedError
@property
def equation(self):
return r'F=f(x)'
# F = \frac{\exp( m(x-d_{50})) / RT} { 1+\exp(m(x-d_{50}))/RT}
"""
==========================================================
EQUILIBRIUM FOLDING models
==========================================================
"""
class TwoStateEquilibrium(core.FitModel):
""" Two state equilibrium denaturation curve - No sloping baseline.
Folding Scheme:
N <-> D
Params:
F = Fraction unfolded
m = m-value
x = denaturant concentration (M)
d50 = denaturant midpoint (M)
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
Clarke and Fersht. Engineered disulfide bonds as probes of
the folding pathway of barnase: Increasing the stability
of proteins against the rate of denaturation.
Biochemistry (1993) vol. 32 (16) pp. 4322-4329
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([1.5, 5.])
self.verified = True
def fit_func(self, x, m, d50):
F = ( np.exp((m*(x-d50))/core.temperature.RT)) / (1.+np.exp((m*(x-d50))/core.temperature.RT))
return F
@property
def equation(self):
return r'F = \frac{\exp( m(x-d_{50})) / RT} { 1+\exp(m(x-d_{50}))/RT}'
class TwoStateEquilibriumSloping(core.FitModel):
""" Two state equilibrium denaturation curve - Sloping baseline.
Folding Scheme:
N <-> D
Params:
F = Fraction unfolded
alpha f = intercept of the native baseline at low denaturation concentrations
beta f = slope/gradient of the native baseline at low denaturation concentrations
alpha u = intercept of the denatured baseline at high denaturation concentrations
beta u = slope/gradient of the denatured baseline at high denaturation concentrations
m = m-value
x = denaturant concentration (M)
d50 = denaturant midpoint (M)
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
Clarke and Fersht. Engineered disulfide bonds as probes of
the folding pathway of barnase: Increasing the stability
of proteins against the rate of denaturation.
Biochemistry (1993) vol. 32 (16) pp. 4322-4329
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([1., 0.1, 0.0, 0.1, 1.5, 5.])
self.verified = True
def fit_func(self, x, alpha_f, beta_f, alpha_u, beta_u, m, d50):
F = (alpha_f+beta_f*x) + (alpha_u+beta_u*x) * (\
( np.exp((m*(x-d50))/core.temperature.RT)) / (1.+np.exp((m*(x-d50))/core.temperature.RT)))
return F
@property
def equation(self):
return r'F = (\alpha_f+\beta_f x) + (\alpha_u+\beta_u x) \cdot \frac{\exp( m(x-d_{50})) / RT} { 1+\exp(m(x-d_{50}))/RT}'
# NOTE (ergm) added on 30/8/2017 and corrected incorrect asscii for running on PC 8/9/2017
class ThreeStateEquilibrium (core.FitModel):
""" Three state equilbrium denaturation curve.
Folding Scheme:
N <-> I <-> D
Params:
Y_obs = The spectroscopic signal maximum as a function of denaturant concentration
Y_N = spectroscopic signals of the native state
Y_D = spectroscopic signals of the denatured state
F_D = fraction denatured
F_N = fraction native
F_I = fraction intermediate
Kni = equilibrium contstant of unfolding native to intermediate state
Kid = equilibrium contstant of unfolding intermediate to denatured state
DGni = stability of native state relative to intermediate state
m_ni = m-value of native to intermediate transition
DGid = stability of intermediate state relative to denatured state
m_id = m-value of intermediate to denatured transition
x = denaturant concentration (M)
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
Hecky J, Muller K.M. Structural Perturbation and Compensation by Directed
Evolution at Physiological Temperature Leads to Thermostabilization of
beta-Lactamase. (2005) Biochemistry 44. pp. 12640-12654
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([1., 0.5, 0.0, 5., 1.5, 5., 1])
# NOTE (ergm) added on 3/11/2017
self.verified = True
def fit_func(self, x, Y_N, Y_I, Y_D, DGni, m_ni, DGid, m_id):
F = (Y_N + Y_I*np.exp((-DGni + m_ni*x)/core.temperature.RT) + Y_D*np.exp((-DGni + m_ni*x)/core.temperature.RT) * np.exp((-DGid + m_id*x)/core.temperature.RT)) \
/ (1 + np.exp((-DGni + m_ni*x)/core.temperature.RT) + np.exp((-DGni + m_ni*x)/core.temperature.RT) * np.exp((-DGid + m_id*x)/core.temperature.RT))
return F
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& \Upsilon_{obs} = \Upsilon_N F_N + \Upsilon_I F_I + \Upsilon_D F_D \ \\ \
\text{where:} \\ \
& F_N = \frac{1} {1 + K_{NI} + K_{NI} K_{ID}}\\ \
& F_I = \frac{K_{NI}} {1 + K_{NI} + K_{NI} K_{ID}}\\ \
& F_D = \frac{K_{NI} K_{ID}} {1 + K_{NI} + K_{NI} K_{ID}}\\ \
\text{and:} \\ \
& K_{NI} = \exp \frac{\Delta G_{NI}^{H_2O} + m_{NI} x} {RT}\\ \
& K_{ID} = \exp \frac{\Delta G_{ID}^{H_2O} + m_{ID} x} {RT}\\ \
\\ \
\text{thus:} \\ \
& \Upsilon_{obs} = \frac{ \Upsilon_N + \Upsilon_I \exp \frac {\Delta G_{NI}^{H_2O} + m_{NI} x} {RT} + \
\Upsilon_D \exp \frac{\Delta G_{NI}^{H_2O} + m_{NI} x} {RT} \cdot \exp \frac{\Delta G_{ID}^{H_2O} + m_{ID} x} {RT}} {1 + \exp \
\frac{\Delta G_{NI}^{H_2O} + m_{NI} x} {RT} + \exp \frac{\Delta G_{NI}^{H_2O} + m_{NI} x} {RT} \cdot \
\exp \frac{\Delta G_{ID}^{H_2O} + m_{ID} x} {RT}}\
\end{aligned}\
\end{equation}'
# NOTE (ergm) added on 1/8/2017
class TwoStateDimerEquilibrium(core.FitModel):
""" Two State model for a dimer denaturation Equilibrium - No Intermediate.
Folding Scheme:
N2 <-> 2D
Params:
Y_obs = spectroscopic signal at a given concentration of urea
Y_N = spectroscopic signal for native monomeric subunits at a concentration of Pt
Y_D = spectroscopic signal for denatured monomeric subunits at a concentration of Pt
alpha_N = intercept of the native baseline at low denaturation concentrations
beta_N = slope/gradient of the native baseline at low denaturation concentrations
alpha_D = intercept of the denatured baseline at high denaturation concentrations
beta_D = slope/gradient of the denatured baseline at high denaturation concentrations
F_D = fraction of unfolded monomers
K_U = Equilibrium Constant for Unfolding of dimer.
Pt = total protein concentration. This variable needs to be set per denaturation curve.
m = m-value
x = denaturant concentration (M)
d50 = denaturant midpoint (M)
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
Mallam and Jackson. Folding studies on a knotted protein.
Journal of Molecular Biology (2005) vol. 346 (5) pp. 1409-1421
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([1., 0.1, 0.0, 0.1, 1.5, 5., 1e-6])
self.constants = (('Pt',1e-6),)
# NOTE (ergm) added on 3/11/2017
self.verified = True
# NOTE (ergm) added on 25/8/2017
def fit_func(self, x, alpha_N, beta_N, alpha_D, beta_D, m, d50, Pt):
K_U = np.exp(((core.temperature.RT * np.log(Pt))-m*(d50-x)) / core.temperature.RT)
F_D = (np.sqrt((np.square(K_U) + (8 * K_U * Pt))) - K_U) / (4*Pt)
Y_0 = ((alpha_N + beta_N*x)*(1-F_D)) + ((alpha_D + beta_D*x)*(F_D))
return Y_0
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& \Upsilon_{obs} = \Upsilon_N \cdot (1-F_D) + \Upsilon_D \cdot F_D \\ \
\text{where} \\ \
& \Upsilon_N = \alpha_N+\beta_N x \\ \
& \Upsilon_D = \alpha_D+\beta_D x \\ \
& F_D = \frac{\sqrt{((K_U^2 + (8 K_U Pt)) - K_U}} {4 Pt} \\ \
& K_U = \exp \frac{(RT \ln(Pt - m(d_{50} - x))} {RT}\
\end{aligned}\
\end{equation}'
# NOTE (ergm) added on 1/8/2017
# NOTE (ergm) updated Folding Scheme - was wrong 7/9/2017
class ThreeStateMonoIEquilibrium(core.FitModel):
""" Three State model for a dimer denaturation Equilibrium - Monomeric intermediate.
Folding Scheme:
N2 <-> 2I <-> 2D
Params:
Y_rel = spectroscopic signal at a given concentration of urea
Y_N = spectroscopic signal for native state
Y_D = spectroscopic signal for denatured state
Y_I = spectroscopic signal for intermediate state
F_D = fraction denatured monomers
F_N = fraction native dimers
F_I = fraction intermediate dimers
Pt = total protein concentration. This variable needs to be set per denaturation curve.
K1 = equilibrium constant of unfolding for native to intermediate state
K2 = equilibrium constant of unfolding for intermediate to denatured state
DG1 = stability of native state relative to intermediate state
m1 = m-value of native to intermediate transition
DG2 = stability of intermediate state relative to denatured state
m2 = m-value of intermediate to denatured transition
x = denaturant concentration (M)
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
Mallam and Jackson. Folding studies on a knotted protein.
Journal of Molecular Biology (2005) vol. 346 (5) pp. 1409-1421
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([1., 0.1, 1.0, 0.1, 1.5, 5., 3., 1e-6])
self.constants = (('Pt',1e-6),)
# NOTE (ergm) added on 3/11/2017
self.verified = True
def fit_func(self, x, DG1, m1, DG2, m2, Y_N, Y_I, Y_D, Pt):
K1 = np.exp((-DG1 + (m1*x)) / core.temperature.RT)
K2 = np.exp((-DG2 + (m2*x)) / core.temperature.RT)
F_I = -(K1*(1+K2) + (np.sqrt(np.square(K1) * np.square(1+K2) +(8*Pt*K1)))) / (4*Pt)
Y_rel = (Y_N * ((2 * Pt * np.square(F_I))/K1)) + (Y_I * F_I) + (Y_D * (K2*F_I))
return Y_rel
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& \Upsilon_{rel} = \Upsilon_N F_N + \Upsilon_I F_I + \Upsilon_D F_D \\ \
\text{expanded:} \\ \
& \Upsilon_{rel} = \Upsilon_N \cdot \frac{2PtF_I^2} {K_1} + \Upsilon_I F_I + \Upsilon_D * K_2F_I \\ \
\\ \
\text{where:} \\ \
& F_I = \frac {- K_1 (1+K_2) + \sqrt{(K_1^2 (1+K_2)^2 + (8 Pt K_1))}} {4Pt} \\ \
& K_1 = \exp \frac{-\Delta G_{H_20}^1 + m_1 x} {RT} \\ \
& K_2 = \exp \frac{-\Delta G_{H_20}^2 + m_2 x} {RT}\
\end{aligned}\
\end{equation}'
# NOTE (ergm) added on 1/8/2017
# NOTE (ergm) updated Folding Scheme - was wrong 7/9/2017
class ThreeStateDimericIEquilibrium(core.FitModel):
""" Three State model for a dimer denaturation Equilibrium - Dimeric Intermediate.
Folding Scheme:
N2 <-> I2 <-> 2D
Params:
Y_rel = spectroscopic signal at a given concentration of urea
Y_N = spectroscopic signal for native state
Y_D = spectroscopic signal for denatured state
Y_I = spectroscopic signal for intermediate state
F_D = fraction denatured monomers
F_N = fraction native dimers
F_I = fraction intermediate dimers
Pt = total protein concentration. This variable needs to be set per denaturation curve.
K1 = equilibrium contstant of unfolding native to intermediate state
K2 = equilibrium contstant of unfolding intermediate to denatured state
DG1 = stability of native state relative to intermediate state
m1 = m-value of native to intermediate transition
DG2 = stability of intermediate state relative to denatured state
m2 = m-value of intermediate to denatured transition
x = denaturant concentration (M)
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
Mallam and Jackson. Folding studies on a knotted protein.
Journal of Molecular Biology (2005) vol. 346 (5) pp. 1409-1421
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([1., 0.1, 0.0, 0.1, 1.5, 5., 2., 1e-6])
self.constants = (('Pt',1e-6),)
# NOTE (ergm) added on 3/11/2017
self.verified = True
def fit_func(self, x, DG1, m1, DG2, m2, Y_N, Y_I, Y_D, Pt):
K1 = np.exp((-DG1 + (m1*x)) / core.temperature.RT)
K2 = np.exp((-DG2 + (m2*x)) / core.temperature.RT)
F_D = (-(K1*K2) + np.sqrt(np.square(K1*K2) + 8*(1+K1)*(K1*K2)*Pt)) / (4*Pt*(1+K1))
Y_rel = (Y_N * ((2 * Pt * np.square(F_D))/(K1*K2))) + (Y_I * ((2 * Pt * np.square(F_D))/K2)) + (Y_D * F_D)
return Y_rel
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& \Upsilon_{rel} = \Upsilon_N F_N + \Upsilon_I F_I + \Upsilon_D F_D \\ \
\text{expanded:} \\ \
& \Upsilon_{rel} = \Upsilon_N \cdot \frac{2PtF_D^2} {K_1 K_2} + \Upsilon_I \frac{2PtF_D^2} {K_2} + \Upsilon_D * (F_D) \\ \
\\ \
\text{where:} \\ \
& F_D = \frac {- K_1 K_2 + \sqrt{((K_1 K_2)^2 + 8(1+K_1)(K_1 K_2)Pt)}} {4Pt (1 + K_1)} \\ \
& K_1 = \exp \frac{-\Delta G_{H_20}^1 + m_1 x} {RT} \\ \
& K_2 = \exp \frac{-\Delta G_{H_20}^2 + m_2 x} {RT}\
\end{aligned}\
\end{equation}'
class HomozipperIsingEquilibrium(core.FitModel):
""" Homopolymer Zipper Ising model
Params:
q = partition function
f = fraction of folded protein
Kappa = equilibrium constant of folding for a given repeating unit
Tau = equilibrium constant of association between 2 repeating units
n = number of repeating units
x = denaturant concentration (M)
Gi = intrinsic stability (folding energy) of a repeating unit i
mi = denaturant sensitivity of the intrinsic stability of a repeating unit i
Gi,i+1 = interface interaction energy between 2 repeating units
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
Aksel and Barrick. Analysis of repeat-protein folding using
nearest-neighbor statistical mechanical models.
Methods in enzymology (2009) vol. 455 pp. 95-125
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([7, 0.1, -.53, -4.6])
self.constants = (('n',7),)
self.verified = True
def fit_func(self, x, n, DG_intrinsic, m_intrinsic, DG_interface):
# # clamp to prevent instability
# if DG_intrinsic<0. or DG_interface>0.:
# return core.FIT_ERROR(x)
k = np.exp(-(DG_intrinsic - m_intrinsic*x) / core.temperature.RT )
#t = np.exp(-(DG_interface - m_interface*x) / core.temperature.RT )
t = np.exp(-(DG_interface) / core.temperature.RT )
pre_factor = (k/(n*(k*t-1)))
numerator = n*(k*t)**(n+2) - (n+2)*(k*t)**(n+1) + (n+2)*k*t-n
denominator = (k*t-1)**2 + k*((k*t)**(n+1) - (n+1)*k*t+n )
theta = pre_factor * (numerator / denominator)
return 1.-theta
# NOTE (ergm) changed on 4/9/2017
@property
def equation(self):
return r'\text{the partition function } (q) \text{ and thus fraction of folded protein } (f) \text{ of n arrayed repeats are given by:}\\ \
\begin{equation} \\ \
\begin{aligned} \
& q = 1 + \frac{\kappa([\kappa \tau]^{n+1} - [n+1]\kappa \tau - n)} {(\kappa \tau + 1)^2} \\ \
\\ \
& f = \frac{1} {n} \sum^{n}_{i=0}i\frac{(n-i+1)\kappa^i\tau^{i-1}} {q} \\ \
\\ \
\text{where:} \\ \
& \kappa (x) = \exp\frac{-G_i} {RT} = \exp\frac{-G_{i,H_20} + m_i x} {RT} \\ \
\\ \
& \tau (x) = \exp\frac{-G_{i,i+1}} {RT} \
\end{aligned}\
\end{equation}'
class HeteropolymerIsingEquilibrium(core.FitModel):
""" Heteropolymer Ising model
Params:
q = partition function
f = fraction of folded protein
Kappa = equilibrium constant of folding for a given repeating unit
Tau = equilibrium constant of association between 2 repeating units
n = number of repeating units
x = denaturant concentration (M)
DG_intrinsic = intrinsic stability (folding energy) of a repeating unit i
m_intrinsic = denaturant sensitivity of the intrinsic stability of a repeating unit i
DG_interface = interface interaction energy between 2 repeating units
R = Universal Gas Constant (kcal.mol-1.K-1)
T = Temperature (Kelvin)
Reference:
Aksel and Barrick. Analysis of repeat-protein folding using
nearest-neighbor statistical mechanical models.
Methods in enzymology (2009) vol. 455 pp. 95-125
"""
def __init__(self):
core.FitModel.__init__(self)
def fit_func(self, x):
raise NotImplementedError('This is a dummy model.')
# NOTE (ergm) changed on 4/9/2017
@property
def equation(self):
return r'\text{the partition function } (q) \text{ and thus fraction of folded protein } (f) \text{ of n arrayed repeats are given by:} \\ \
\begin{equation} \\ \
\begin{aligned} \\ \
\kappa(x) &= \exp(-(\Delta G_{intrinsic} - m_{intrinsic}x) / RT) \\ \
\tau(x) &= \exp(-\Delta G_{interface}) / RT) \\ \
q(i) &= \
\begin{bmatrix} 0 & 1\end{bmatrix} \
\begin{bmatrix} \kappa_1\tau_{-1} & 1\\ \kappa & 1 \end{bmatrix} \
\ldots \
\begin{bmatrix} \kappa_n\tau_{n-1} & 1\\ \kappa & 1 \end{bmatrix} \
\begin{bmatrix} 1 \\ 1 \end{bmatrix} \\ \
\theta &= \frac{1}{nq(n)} \sum_{i=0}^{n}{q(i)} \
\end{aligned} \
\end{equation}'
"""
==========================================================
KINETIC FOLDING models
==========================================================
"""
class TwoStateChevron(core.FitModel):
""" Two state chevron plot.
Folding Scheme:
N <-> D
Params:
k obs = rate constant of unfolding or refolding at a particular denaturant concentration
kf = rate constant of refolding at a particular denaturant concentration
mf = the gradient of refolding arm of the chevron
ku = rate constant of unfolding at a a particular denaturant concentration
mu = the gradient of unfolding arm of the chevron
x = denaturant concentration (M)
Reference:
Jackson SE and Fersht AR. Folding of chymotrypsin inhibitor 2.
1. Evidence for a two-state transition.
Biochemistry (1991) 30(43):10428-10435.
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([100., 1.3480, 5e-4, 1.])
#self.constants = (('mf',1.76408),('mu',1.13725))
self.verified = True
def fit_func(self, x, kf, mf, ku, mu):
k_obs = kf*np.exp(-mf*x) + ku*np.exp(mu*x)
return k_obs
def error_func(self, y):
return np.log(y)
# NOTE (ergm) added on 24/8/2017
# def components(self, x, kf, mf, ku, mu):
# k_f = kf*np.exp(-mf*x)
# k_u = ku*np.exp(mu*x)
# k_obs = k_f + k_u
# return {'k_f':k_f, 'k_u':k_u}
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& k_{obs} = k_f + k_u \\ \
\\ \
\text{where:} \\ \
& k_f = k_f^{H_2O}\exp(-m_{kf}x)\\ \
& k_u = k_u^{H_2O}\exp(m_{ku}x) \\ \
\text{thus:} \\ \
& k_{obs} = k_f^{H_2O}\exp(-m_{kf}x) + k_u^{H_2O}\exp(m_{ku}x)\\ \
\end{aligned} \
\end{equation}'
class ThreeStateChevron(core.FitModel):
""" Three state chevron with single intermediate.
Folding Scheme:
N <-> I <-> D
Params:
k obs = rate constant of unfolding or refolding at a particular denaturant concentration
kfi = microscopic rate constant for the conversion of folded to intermediate
kif = microscopic rate constant for the conversion of intermediate to folded
i.e. k_if = kif(H20) * exp((mi - mif)*x)
Kiu = equilibrium constant for the rapid equilibration between intermediate & unfolded
i.e. Kiu = Kiu(H2O) * exp((mu-mi)*x)
mif = m-value associated with the kinetic transition between intermediate & folded
mi = m-value associated with the equilibrium transition between intermediate & folded
mu = m-value associated with the equilibrium transition between unfolded & folded
x = denaturant concentration (M)
Reference:
Parker et al. An integrated kinetic analysis of
intermediates and transition states in protein folding reactions.
Journal of molecular biology (1995) vol. 253 (5) pp. 771-86
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([4.5e-4, -9.5e-1, 1.3e9, -6.9, 1.4e-8, -1.6])
#self.constants = (('mif',-0.97996),('mi',-6.00355),('mu',-1.66154))
self.verified = True
def fit_func(self, x, kfi, mif, kif, mi, Kiu, mu):
k_fi = kfi*np.exp(-mif*x)
k_if = kif*np.exp((mi - mif)*x)
K_iu = Kiu*np.exp((mu - mi)*x)
k_obs = k_fi + k_if / (1.+1./K_iu)
return k_obs
def error_func(self, y):
return np.log(y)
def components(self, x, kfi, mif, kif, mi, Kiu, mu):
k_fi = kfi*np.exp(-mif*x)
k_if = kif*np.exp((mi - mif)*x)
k_obs_I = k_fi + k_if
return {'kobs_I':k_obs_I}
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& k_{obs} = \frac{k_{fi} + k_{if}} {(1+1/K_{iu})} \\ \
\\ \
\text{where:} \\ \
& k_{fi} = k_{fi}^{H_2O}\exp(-m_{fi}x)\\ \
& k_{if} = k_{if}^{H_2O}\exp((m_i - m_{if})x)\\ \
& K_{iu} = K_{iu}^{H_2O}\exp((m_u - m_i)x)\\ \
\text{thus:} \\ \
& k_{obs} = k_{fi}^{H_2O}\exp(-m_{if}x) + k_{if}^{H_2O}\exp((m_i - m_{if})x) /(1 + 1 / (K_{iu}^{H_2O}\exp((m_u-m_i)x)))\\ \
\end{aligned} \
\end{equation}'
class ThreeStateFastPhaseChevron(core.FitModel):
""" Three state chevron with single intermediate.
Folding Scheme: N <-> I <-> D
Params:
k obs = rate constant of unfolding or refolding at a particular denaturant concentration
kfi = microscopic rate constant for the conversion of folded to intermediate
kif = microscopic rate constant for the conversion of intermediate to folded
kiu = microscopic rate constant for the conversion of intermediate to unfolded
kui = microscopic rate constant for the conversion of unfolded to intermediate
Kiu = equilibrium constant for the rapid equilibration between intermediate & unfolded
mfi = m-value associated with the kinetic transition between folded & intermediate
mif = m-value associated with the kinetic transition between intermediate & folded
miu = m-value associated with the kinetic transition between intermediate & unfolded
mui = m-value associated with the kinetic transition between unfolded & intermediate
x = denaturant concentration (M)
Reference:
Parker et al. An integrated kinetic analysis of
intermediates and transition states in protein folding reactions.
Journal of molecular biology (1995) vol. 253 (5) pp. 771-86
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([172., 1.42, .445, .641, 1e4, 2.71313, 1.83e-3, 1.06])
#self.constants = (('kui',172.), ('mui',1.42), ('kiu',.445), ('miu',.641), ('mif',-2.71313),('mfi',1.06534))
self.verified = True
def fit_func(self, x, kui, mui, kiu, miu, kif, mif, kfi, mfi):
k_iu = kiu*np.exp(miu*x)
k_ui = kui*np.exp(-mui*x)
k_if = kif*np.exp(-mif*x)
k_fi = kfi*np.exp(mfi*x)
K_iu = k_iu / (k_iu+k_ui)
k_obs = k_fi + k_if / (1.+1./K_iu)
return k_obs
def error_func(self, y):
return np.log(y)
def components(self, x, kui, mui, kiu, miu, kif, mif, kfi, mfi):
k_iu = kiu*np.exp(miu*x)
k_ui = kui*np.exp(-mui*x)
k_if = kif*np.exp(-mif*x)
k_fi = kfi*np.exp(mfi*x)
k_obs_I = k_iu + k_ui
k_obs_N = k_fi + k_if
return {'kobs_I':k_obs_I} #, 'kobs_N':k_obs_N}
# NOTE (ergm) added on 23/8/2017
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& k_{obs} = \frac{k_{fi} + k_{if}} {(1+1/K_{iu})} \\ \
\\ \
\text{where:} \\ \
& k_{fi} = k_{fi}^{H_2O}\exp(m_{fi}x)\\ \
& k_{if} = k_{if}^{H_2O}\exp(-m_{if}x)\\ \
& k_{iu} = k_{iu}^{H_2O}\exp(m_{iu}x)\\ \
& k_{ui} = k_{ui}^{H_2O}\exp(-m_{ui}x)\\ \
& K_{iu} = \frac{k_{iu}} {k_{iu} + k_{ui}}\\ \
\end{aligned} \
\end{equation}'
class ThreeStateSequentialChevron(core.FitModel):
""" Three state metastable intermediate chevron plot.
Folding Scheme: N <-> I <-> D
Params:
k obs = rate constant of unfolding or refolding at a particular denaturant concentration
kfi = microscopic rate constant for the conversion of folded to intermediate
kif = microscopic rate constant for the conversion of intermediate to folded
kiu = microscopic rate constant for the conversion of intermediate to unfolded
kui = microscopic rate constant for the conversion of unfolded to intermediate
mfi = m-value associated with the kinetic transition between folded & intermediate
mif = m-value associated with the kinetic transition between intermediate & folded
miu = m-value associated with the kinetic transition between intermediate & unfolded
mui = m-value associated with the kinetic transition between unfolded & intermediate
x = denaturant concentration (M)
Reference:
Bachmann and Kiefhaber. Apparent two-state tendamistat
folding is a sequential process along a defined route.
J Mol Biol (2001) vol. 306 (2) pp. 375-386
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([2e4, 0.3480, 1e4, 0, 20.163, 1.327, 0.3033, 0.2431])
# NOTE (ergm) changed constants on 3/10/2017
self.constants = (('kiu', 1.e4),('miu',0.))
self.verified = True
def fit_func(self, x, kui, mui, kiu, miu, kif, mif, kfi, mfi):
k_ui = kui*np.exp(-mui*x)
k_iu = kiu*np.exp(miu*x)
k_if = kif*np.exp(-mif*x)
k_fi = kfi*np.exp(mfi*x)
lam_1 = -(k_ui + k_iu + k_if + k_fi)
lam_2 = k_ui * (k_if+k_fi) + k_iu*k_fi
k_obs = 0.5 * (-lam_1 - np.sqrt(lam_1**2 - 4*lam_2))
return k_obs
def error_func(self, y):
return np.log(y)
def components(self, x, kui, mui, kiu, miu, kif, mif, kfi, mfi):
k_ui = kui*np.exp(-mui*x)
k_iu = kiu*np.exp(miu*x)
k_if = kif*np.exp(-mif*x)
k_fi = kfi*np.exp(mfi*x)
k_TS1 = k_ui + (k_fi/kif)*k_iu
k_TS2 = (k_ui/k_iu)*k_if + k_fi
return {'kTS1':k_TS1, 'kTS2':k_TS2}
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& k_{obs} = 0.5(-A_2 \pm \sqrt{A_2^2 - 4A_1}) \\ \
\\ \
\text{where:}\\ \
& A_1 = -(k_{ui} + k_{iu} + k_{if} + k_{fi}) \\ \
& A_2 = k_{ui}(k_{if} + k_{fi}) + k_{iu}k_{if} \\ \
\text{and:} \\ \
& k_{fi} = k_{fi}^{H_2O}\exp(m_{fi}x)\\ \
& k_{if} = k_{if}^{H_2O}\exp(-m_{if}x)\\ \
& k_{iu} = k_{iu}^{H_2O}\exp(m_{iu}x)\\ \
& k_{ui} = k_{ui}^{H_2O}\exp(-m_{ui}x)\\ \
\end{aligned} \
\end{equation}'
class ParallelTwoStateChevron(core.FitModel):
""" Parallel Two state chevron plot.
Folding Scheme:
N <-> D
^ ^
|_____|
Params:
k obs = rate constant of unfolding or refolding at a particular denaturant concentration
k_obs_A = rate constant of unfolding or refolding of pathway A at a particular denaturant concentration
k_obs_B = rate constant of unfolding or refolding of pathway B at a particular denaturant concentration
mf_A = the gradient of refolding arm of pathway A
mf_B = the gradient of refolding arm of pathway B
mu_A = the gradient of unfolding arm of pathway A
mu_B = the gradient of unfolding arm of pathway B
x = denaturant concentration (M)
Reference:
Lowe & Itzhaki. Rational redesign of the folding pathway of a modular protein.
PNAS (2007) vol. 104 (8) pp. 2679-2684
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([50., 1.3480, 5e-4, 1., 150., 3.5])
def fit_func(self, x, kf_A, mf_A, ku_A, mu_A, kf_B, mf_B):
if mf_A < 0. or mf_B < 0. or mu_A < 0.:
return core.FIT_ERROR(x)
if kf_A <0. or ku_A <0. or kf_B < 0.:
return core.FIT_ERROR(x)
deltaG_A = kf_A / ku_A
ku_B = kf_B / deltaG_A
mu_B = np.abs(mf_A + mu_A) - np.abs(mf_B)
k_obs_A = kf_A*np.exp(-mf_A*x) + ku_A*np.exp(mu_A*x)
k_obs_B = kf_B*np.exp(-mf_B*x) + ku_B*np.exp(mu_B*x)
k_obs = k_obs_A + k_obs_B
return k_obs
def error_func(self, y):
return np.log(y)
def components(self, x, kf_A, mf_A, ku_A, mu_A, kf_B, mf_B):
deltaG_A = kf_A / ku_A
ku_B = kf_B / deltaG_A
mu_B = np.abs(mf_A + mu_A) - np.abs(mf_B)
k_obs_A = kf_A*np.exp(-mf_A*x) + ku_A*np.exp(mu_A*x)
k_obs_B = kf_B*np.exp(-mf_B*x) + ku_B*np.exp(mu_B*x)
k_obs = k_obs_A + k_obs_B
return {'kobs_A':k_obs_A, 'kobs_B':k_obs_B}
# NOTE (ergm) added on 23/8/2017
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& k_{obs} = k_{obs}^A + k_{obs}^B \\ \
\\ \
\text{where:}\\ \
& \Delta G^A = k_f^A / k_u^A \\ \
& k_u^B = k_f^B / \Delta G^A \\ \
& m_u^B = (m_f^A + m_u^A) - (m_f^B) \\ \
& k_{obs}^A = k_f^A exp(-m_f^A x) + k_u^A exp(m_u^A x) \\ \
& k_{obs}^B = k_f^B exp(-m_f^B x) + k_u^B exp(m_u^B x) \\ \
\end{aligned} \
\end{equation}'
class ParallelTwoStateUnfoldingChevron(core.FitModel):
""" Parallel Two state unfolding chevron plot.
Folding Scheme:
N -> D
| ^
|____|
Params:
k obs = rate constant of unfolding at a particular denaturant concentration
k_obs_A = rate constant of unfolding of pathway A at a particular denaturant concentration
k_obs_B = rate constant of unfolding of pathway B at a particular denaturant concentration
mu_A = the gradient of unfolding arm of pathway A
mu_B = the gradient of unfolding arm of pathway B
x = denaturant concentration (M)
Reference:
Hutton et al. Mapping the Topography of a Protein Energy Landscape.
JACS (2015) vol. 137 (46) pp. 14610-14625
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([5e-4, 1., 1e-5, 1.5])
def fit_func(self, x, ku_A, mu_A, ku_B, mu_B):
if mu_A < 0. or mu_B < 0.:
return core.FIT_ERROR(x)
k_obs_A = ku_A*np.exp(mu_A*x)
k_obs_B = ku_B*np.exp(mu_B*x)
k_obs = k_obs_A + k_obs_B
return k_obs
def error_func(self, y):
return np.log(y)
def components(self, x, ku_A, mu_A, ku_B, mu_B):
k_obs_A = ku_A*np.exp(mu_A*x)
k_obs_B = ku_B*np.exp(mu_B*x)
k_obs = k_obs_A + k_obs_B
return {'kobs_A':k_obs_A, 'kobs_B':k_obs_B}
# NOTE (ergm) added on 23/8/2017
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& k_{obs} = k_obs^A + k_obs^B \\ \
\\ \
\text{where:}\\ \
& k_obs^A = k_u^A exp(m_u^A x) \\ \
& k_obs^B = k_u^B exp(m_u^B x) \\ \
\end{aligned} \
\end{equation}'
class TwoStateChevronMovingTransition(core.FitModel):
""" Two state chevron with moving transition state.
Folding Scheme:
N <-> D
Params:
k obs = rate of unfolding or refolding at a particular denaturant concentration
kf = rate constant of refolding at a particular denaturant concentration
mf = refolding coefficient for the first order [D] term.
ku = rate constant of unfolding at a particular denaturant concentration
mu = unfolding coefficient for the first order [D] term.
m' = coefficient for the second-order [D] term (both unfolding and refolding).
x = denaturant concentration (M)
Reference:
Ternstrom et al. From snapshot to movie: phi analysis
of protein folding transition states taken one step
further. PNAS (1999) vol. 96 (26) pp. 14854-9
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
# NOTE (ergm) changed on 23/8/2017
self.default_params = np.array([5e-5, 0.2, 10., 0.2, -1.])
# NOTE (ergm) added on 3/11/2017
self.verified = True
# NOTE (ergm) changed on 23/8/2017
def fit_func(self, x, ku, mu, kf, mf, m_prime):
k_obs = ku*(np.exp(mu*x))*(np.exp(m_prime*x*x)) + kf*(np.exp(mf*x))*(np.exp(m_prime*x*x))
return k_obs
def error_func(self, y):
return np.log(y)
# NOTE (ergm) added on 23/8/2017
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& k_{obs} = k_u + k_f \\ \
\\ \
\text{where:}\\ \
& k_u = k_u^{H_2O} \cdot \exp(m_{u} x) \cdot \exp(m^{*} x^2) \\ \
& k_f = k_f^{H_2O} \cdot \exp(m_{f} x) \cdot \exp(m^{*} x^2) \\ \
\end{aligned} \
\end{equation}'
# NOTE (ergm) added on 24/8/2017 & modified on 7/11/2017
class ChevronPolynomialFit(core.FitModel):
""" Chevron fit with 2 different second order polynomials for kf & ku.
Folding Scheme:
N <-> D
Params:
k obs = rate of unfolding or refolding at a particular denaturant concentration
kf = rate constant of refolding at a particular denaturant concentration
mf & mf* = are the refolding coefficients for the first and second-order [D] terms, respectively.
ku = rate constant of unfolding at a particular denaturant concentration
mu & mu* = are the unfolding coefficients for the first and second-order [D] terms, respectively.
x = denaturant concentration (M)
Reference:
Modified version of equation found in:
Ternstrom et al. From snapshot to movie: phi analysis
of protein folding transition states taken one step
further. PNAS (1999) vol. 96 (26) pp. 14854-9
"""
def __init__(self):
core.FitModel.__init__(self)
fit_args = self.fit_func_args
self.params = tuple( [(fit_args[i],i) for i in range(len(fit_args))] )
self.default_params = np.array([5e-5, 1., -0.5, 100., 1., -0.5])
# NOTE (ergm) changed on 3/11/2017
self.verified = True
def fit_func(self, x, ku, mu, mu_prime, kf, mf, mf_prime):
k_obs = ku*(np.exp(mu*x))*(np.exp(mu_prime*x*x)) + kf*(np.exp(mf*x))*(np.exp(mf_prime*x*x))
return k_obs
def error_func(self, y):
return np.log(y)
@property
def equation(self):
return r'\begin{equation} \
\begin{aligned} \
& k_{obs} = k_u + k_f\\ \
\\ \
\text{where:}\\ \
& k_u = k_u^{H_2O} \cdot \exp(m_{u} x) \cdot \exp(m_{u}^{*} x^2) \\ \
& k_f = k_f^{H_2O} \cdot \exp(m_{f} x) \cdot \exp(m_{f}^{*} x^2) \\ \
\end{aligned} \
\end{equation}'
if __name__ == "__main__":
get_models()
| mit |
manashmndl/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 253 | 4158 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
asurunis/CrisisMappingToolkit | cmt/radar/martinis.py | 1 | 16581 | # -----------------------------------------------------------------------------
# Copyright * 2014, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The Crisis Mapping Toolkit (CMT) v1 platform is licensed under the Apache
# License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# -----------------------------------------------------------------------------
import os
import ee
import math
import numpy
import scipy
import scipy.special
import scipy.optimize
import histogram
import matplotlib
#matplotlib.use('tkagg')
import matplotlib.pyplot as plt
from cmt.mapclient_qt import addToMap
#------------------------------------------------------------------------
''' sar_martinis radar algorithm (find threshold by histogram splits on selected subregions)
Algorithm from paper:
"Towards operational near real-time flood detection using a split-based
automatic thresholding procedure on high resolution TerraSAR-X data"
by S. Martinis, A. Twele, and S. Voigt, Nat. Hazards Earth Syst. Sci., 9, 303-314, 2009
This algorithm seems extremely sensitive to multiple threshold and
scale parameters. So far it has not worked well on any data set.
'''
RED_PALETTE = '000000, FF0000'
BLUE_PALETTE = '000000, 0000FF'
TEAL_PALETTE = '000000, 00FFFF'
LBLUE_PALETTE = '000000, ADD8E6'
GREEN_PALETTE = '000000, 00FF00'
GRAY_PALETTE = '000000, FFFFFF'
def getBoundingBox(bounds):
'''Returns (minLon, minLat, maxLon, maxLat) from domain bounds'''
coordList = bounds['coordinates'][0]
minLat = 999
minLon = 999999
maxLat = -999
maxLon = -999999
for c in coordList:
if c[0] < minLon:
minLon = c[0]
if c[0] > maxLon:
maxLon = c[0]
if c[1] < minLat:
minLat = c[1]
if c[1] > maxLat:
maxLat = c[1]
return (minLon, minLat, maxLon, maxLat)
def divideUpBounds(bounds, boxSizeMeters, maxBoxesPerSide):
'''Divides up a single boundary into a grid based on a grid size in meters'''
# Get the four corners of the box and side widths in meters
(minLon, minLat, maxLon, maxLat) = getBoundingBox(bounds)
bottomLeft = ee.Geometry.Point(minLon, minLat)
topLeft = ee.Geometry.Point(minLon, maxLat)
bottomRight = ee.Geometry.Point(maxLon, minLat)
topRight = ee.Geometry.Point(maxLon, maxLat)
height = float(bottomLeft.distance(topLeft).getInfo())
width = float(bottomLeft.distance(bottomRight).getInfo())
# Determine the number of boxes
numBoxesX = int(math.ceil(width / boxSizeMeters))
numBoxesY = int(math.ceil(height / boxSizeMeters))
if numBoxesX > maxBoxesPerSide:
numBoxesX = maxBoxesPerSide
if numBoxesY > maxBoxesPerSide:
numBoxesY = maxBoxesPerSide
boxSizeMeters = ((width/numBoxesX) + (height/numBoxesY)) / 2
print 'Using ' + str(numBoxesX*numBoxesY) + ' boxes of size ' + str(boxSizeMeters)
# Now compute the box boundaries in degrees
boxWidthLon = (maxLon - minLon) / numBoxesX
boxHeightLat = (maxLat - minLat) / numBoxesY
y = minLat
boxList = []
for r in range(0,numBoxesY):
y = y + boxHeightLat
x = minLon
for c in range(0,numBoxesX):
x = x + boxWidthLon
boxBounds = ee.Geometry.Rectangle(x, y, x+boxWidthLon, y+boxHeightLat)
#print boxBounds
boxList.append(boxBounds)
return boxList, boxSizeMeters
def getBoundsCenter(bounds):
'''Returns the center point of a boundary'''
coordList = bounds['coordinates'][0]
meanLat = 0
meanLon = 0
for c in coordList:
meanLat = meanLat + c[1]
meanLon = meanLon + c[0]
meanLat = meanLat / len(coordList)
meanLon = meanLon / len(coordList)
return (meanLat, meanLon)
#
#def __show_histogram(histogram, binCenters):
# '''Create a plot of a histogram'''
# plt.bar(binCenters, histogram)
#
# plt.show()
#def __show_histogram(histogram, params=None):
# '''Create a plot of a histogram'''
# #values = histogram['histogram']
# #start = histogram['bucketMin']
# #width = histogram['bucketWidth']
# ind = numpy.arange(start=start, stop=start + width * len(values), step=width)[:-1]
# plt.bar(ind, height=values[:-1], width=width, color='b')
# #if params != None:
# # m = domains.MINIMUM_VALUES[instrument]
# # if instrument == domains.UAVSAR:
# # m = math.log10(m)
# # mid = int((params[0] - start) / width)
# # cumulative = sum(values[:mid]) + values[mid] / 2
# # scale = cumulative / __cdf(params, m, params[0])
# # plt.bar(ind, map(lambda x : scale * (__cdf(params, m, x + width / 2) - __cdf(params, m, x - width
#
def applyCutlerLinearLogScale(grayImage, roi):
'''Translates the input SAR image into a hybrid linear-log scale as described in
"Robust automated thresholding of SAR imagery for open-water detection"
by Patrick J Cutler and Frederick W Koehler'''
TOP_SECTION_PERCENTILE = 99
TOP_SECTION_START = 221
topRange = 256 - TOP_SECTION_START
# Compute a histogram of the entire area
# - Do this at a lower resolution to reduce computation time
PERCENTILE_SCALE = 50 # Resolution in meters to compute the percentile at
percentiles = grayImage.reduceRegion(ee.Reducer.percentile([0, TOP_SECTION_PERCENTILE, 100], ['min', 'split', 'max']),
roi, PERCENTILE_SCALE).getInfo()
# Extracting the results is annoying because EE prepends the channel name
minVal = next(val for key, val in percentiles.items() if 'min' in key)
splitVal = next(val for key, val in percentiles.items() if 'split' in key)
maxVal = next(val for key, val in percentiles.items() if 'max' in key)
lowRange = splitVal - minVal
logMin = math.log10(splitVal)
logMax = math.log10(maxVal)
logRange = logMax - logMin
#addToMap(grayImage.select(['vh']), {}, 'vh', False)
# Intensities from 0 to 98th percent are mapped to 0 - 220 on a linear scale
# Intensities from 99 to 100th percent are mapped to 221 - 255 on a log scale
lowMask = grayImage.lt(splitVal )
highMask = grayImage.gte(splitVal)
#addToMap(lowMask, {'min': 0, 'max': 1, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'low range', False)
#addToMap(highMask, {'min': 0, 'max': 1, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'high range', False)
linearPortion = grayImage.subtract(minVal).divide(lowRange).multiply(TOP_SECTION_START-1).multiply(lowMask )#.uint8()
logPortion = grayImage.log10().subtract(logMin).divide(logRange).multiply(topRange).add(TOP_SECTION_START).multiply(highMask)
#addToMap(linearPortion, {'min': 0, 'max': 255, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'linear', False)
#addToMap(logPortion, {'min': 0, 'max': 255, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'log', False)
scaledImage = linearPortion.add(logPortion)
return scaledImage
def sar_martinis_cr(domain):
'''Just calls sar_martinis with the CR option instead of the default CV option'''
return sar_martinis(domain, True)
def sar_martinis(domain, cr_method=False):
'''Compute a global threshold via histogram splitting on selected subregions'''
sensor = domain.get_radar()
radarImage = sensor.image
# Many papers reccomend a median type filter to remove speckle noise.
# 1: Divide up the image into a grid of tiles, X
# Divide up the region into a grid of subregions
MAX_BOXES_PER_SIDE = 12 # Cap the number of boxes at 144
DESIRED_BOX_SIZE_METERS = 3000
boxList, boxSizeMeters = divideUpBounds(domain.bounds, DESIRED_BOX_SIZE_METERS, MAX_BOXES_PER_SIDE)
# Extract the center point from each box
centersList = map(getBoundsCenter, boxList)
# SENTINEL = 12m/pixel
KERNEL_SIZE = 13 # Each box will be covered by a 13x13 pixel kernel
metersPerPixel = boxSizeMeters / KERNEL_SIZE
print 'Using metersPerPixel: ' + str(metersPerPixel)
avgKernel = ee.Kernel.square(KERNEL_SIZE, 'pixels', True); # <-- EE fails if this is in meters!
# Select the radar layer we want to work in
if 'water_detect_radar_channel' in domain.algorithm_params:
channelName = domain.algorithm_params['water_detect_radar_channel']
else: # Just use the first radar channel
channelName = sensor.band_names[0]
# Rescale the input data so the statistics are not dominated by very bright pixels
GRAY_MAX = 255
grayLayer = applyCutlerLinearLogScale(radarImage.select([channelName]), domain.bounds)
#addToMap(grayLayer, {'min': 0, 'max': GRAY_MAX, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'grayLayer', False)
# Compute the global mean, then make a constant image out of it.
globalMean = grayLayer.reduceRegion(ee.Reducer.mean(), domain.bounds, metersPerPixel)
globalMeanImage = ee.Image.constant(globalMean.getInfo()[channelName])
print 'global mean = ' + str(globalMean.getInfo()[channelName])
# Compute mean and standard deviation across the entire image
meanImage = grayLayer.convolve(avgKernel)
graysSquared = grayLayer.pow(ee.Image(2))
meansSquared = meanImage.pow(ee.Image(2))
meanOfSquaredImage = graysSquared.convolve(avgKernel)
meansDiff = meanOfSquaredImage.subtract(meansSquared)
stdImage = meansDiff.sqrt()
# Debug plots
#addToMap(meanImage, {'min': 3000, 'max': 70000, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'Mean', False)
#addToMap(stdImage, {'min': 3000, 'max': 200000, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'StdDev', False)
#addToMap(meanImage, {'min': 0, 'max': GRAY_MAX, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'Mean', False)
#addToMap(stdImage, {'min': 0, 'max': 40, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'StdDev', False)
# Compute these two statistics across the entire image
CV = meanImage.divide(stdImage).reproject( "EPSG:4326", None, metersPerPixel)
R = meanImage.divide(globalMeanImage).reproject("EPSG:4326", None, metersPerPixel)
# 2: Prune to a reduced set of tiles X'
# Parameters which control which sub-regions will have their histograms analyzed
# - These are strongly influenced by the smoothing kernel size!!!
MIN_CV = 0.7
MAX_CV = 1.3
MAX_R = 1.1
MIN_R = 0.5
# Debug plots
addToMap(CV, {'min': 0, 'max': 4.0, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'CV', False)
addToMap(R, {'min': 0, 'max': 4.0, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'R', False)
if cr_method:
MIN_CR = 0.10
# sar_griefeneder reccomends replacing CV with CR = (std / gray value range), min value 0.05
imageMin = grayLayer.reduceRegion(ee.Reducer.min(), domain.bounds, metersPerPixel).getInfo()[channelName]
imageMax = grayLayer.reduceRegion(ee.Reducer.max(), domain.bounds, metersPerPixel).getInfo()[channelName]
grayRange = imageMax - imageMin
CR = stdImage.divide(grayRange)
#addToMap(CR, {'min': 0, 'max': 0.3, 'opacity': 1.0, 'palette': GRAY_PALETTE}, 'CR', False)
# Filter out pixels based on computed statistics
t1 = CV.gte(MIN_CV)
t2 = CV.lte(MAX_CV)
t3 = R.gte(MIN_R)
t4 = R.lte(MAX_R)
if cr_method:
temp = CR.gte(MIN_CR).And(t3).And(t4)
else:
temp = t1.And(t2).And(t3).And(t4)
X_prime = temp.reproject("EPSG:4326", None, metersPerPixel)
addToMap(X_prime.mask(X_prime), {'min': 0, 'max': 1, 'opacity': 1.0, 'palette': TEAL_PALETTE}, 'X_prime', False)
# 3: Prune again to a final set of tiles X''
# Further pruning happens here but for now we are skipping it and using
# everything that got by the filter. This would speed local computation.
# - This is equivalent to using a large number for N'' in the original paper
# (which does not suggest a value for N'')
X_doublePrime = X_prime
# 4: For each tile, compute the optimal threshold
# Assemble all local gray values at each point ?
localPixelLists = grayLayer.neighborhoodToBands(avgKernel)
maskWrapper = ee.ImageCollection([X_doublePrime]);
collection = ee.ImageCollection([localPixelLists]);
# Extract the point data at from each sub-region!
localThresholdList = []
usedPointList = []
rejectedPointList = []
for loc in centersList:
try:
thisLoc = ee.Geometry.Point(loc[1], loc[0])
# If the mask for this location is invalid, skip this location
maskValue = maskWrapper.getRegion(thisLoc, metersPerPixel);
maskValue = maskValue.getInfo()[1][4] # TODO: Not the best way to grab the value!
if not maskValue:
rejectedPointList.append(thisLoc)
continue
# Otherwise pull down all the pixel values surrounding this center point
pointData = collection.getRegion(thisLoc, metersPerPixel)
pixelVals = pointData.getInfo()[1][4:] # TODO: Not the best way to grab the value!
# TODO: Can EE handle making a histogram around this region or do we need to do this ourselves?
#pointData = localPixelLists.reduceRegion(thisRegion, ee.Reducer.histogram(), SAMPLING_SCALE);
#print pointData.getInfo()
#print pixelVals
#__show_histogram(pixelVals)
#plt.bar(range(len(pixelVals)), pixelVals)
# Compute a histogram from the pixels (TODO: Do this with EE!)
NUM_BINS = 256
hist, binEdges = numpy.histogram(pixelVals, NUM_BINS)
binCenters = numpy.divide(numpy.add(binEdges[:NUM_BINS], binEdges[1:]), 2.0)
# Compute a split on the histogram
splitVal = histogram.splitHistogramKittlerIllingworth(hist, binCenters)
print "Computed local threshold = " + str(splitVal)
localThresholdList.append(splitVal)
usedPointList.append(thisLoc)
#plt.bar(binCenters, hist)
#plt.show()
except Exception,e:
print 'Failed to compute a location:'
print str(e)
numUsedPoints = len(usedPointList)
numUnusedPoints = len(rejectedPointList)
if (numUsedPoints > 0):
usedPointListEE = ee.FeatureCollection(ee.Feature(usedPointList[0]))
for i in range(1,numUsedPoints):
temp = ee.FeatureCollection(ee.Feature(usedPointList[i]))
usedPointListEE = usedPointListEE.merge(temp)
usedPointsDraw = usedPointListEE.draw('00FF00', 8)
addToMap(usedPointsDraw, {}, 'Used PTs', False)
if (numUnusedPoints > 0):
unusedPointListEE = ee.FeatureCollection(ee.Feature(rejectedPointList[0]))
for i in range(1,numUnusedPoints):
temp = ee.FeatureCollection(ee.Feature(rejectedPointList[i]))
unusedPointListEE = unusedPointListEE.merge(temp)
unusedPointsDraw = unusedPointListEE.draw('FF0000', 8)
addToMap(unusedPointsDraw, {}, 'Unused PTs', False)
# 5: Use the individual thresholds to compute a global threshold
computedThreshold = numpy.median(localThresholdList) # Nothing fancy going on here!
print 'Computed global threshold = ' + str(computedThreshold)
finalWaterClass = grayLayer.lte(computedThreshold)
#addToMap(finalWaterClass.mask(finalWaterClass), {'min': 0, 'max': 1, 'opacity': 0.6, 'palette': RED_PALETTE}, 'mirtinis class', False)
# Rename the channel to what the evaluation function requires
finalWaterClass = finalWaterClass.select([channelName], ['b1'])
return finalWaterClass
| apache-2.0 |
mac389/scientific-consensus | Graphics.py | 1 | 3073 | import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from matplotlib import rcParams
from scipy.stats import percentileofscore
from awesome_print import ap
rcParams['text.usetex'] = True
format = lambda text: r'\Large \textbf{\textsc{%s}}'%text
def save_graph(g,filename='topology'):
labels = {i:'%.02f'%g.node[i].estimate_of_pi for i in xrange(len(g))}
pos = nx.spring_layout(g,k=.25)
n = nx.draw_networkx(g,pos=pos,labels=labels, node_size=800,linewidth=None)
plt.axis('off')
plt.tight_layout()
plt.savefig('%s.png'%filename)
plt.close()
def my_boxplot(data,filename,ylabel=None,xlabel=None,xticklabels=None):
fig = plt.figure()
ax = fig.add_subplot(111)
bp = ax.boxplot(data,patch_artist=True)
adjust_spines(ax)
## change outline color, fill color and linewidth of the boxes
for box in bp['boxes']:
# change outline color
box.set( color='#7570b3', linewidth=2)
# change fill color
box.set( facecolor = '#1b9e77' )
## change color and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color='#7570b3', linewidth=2)
## change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color='#7570b3', linewidth=2)
## change color and linewidth of the medians
for median in bp['medians']:
median.set(color='#b2df8a', linewidth=2)
## change the style of fliers and their fill
for flier in bp['fliers']:
flier.set(marker='o', color='#e7298a', alpha=0.5)
if ylabel is not None:
ax.set_ylabel(ylabel)
if xlabel is not None:
ax.set_xlabel(xlabel)
if xticklabels is not None:
ax.set_xticklabels(xticklabels)
#plt.legend(frameon=False)
plt.savefig('%s.tiff'%filename)
plt.close()
def compare_degree_sequences(one,two): #TODO: Expand to take arbitrary list
fig = plt.figure()
ax = fig.add_subplot(111)
rank_one = np.array([percentileofscore(one,num) for num in one[::-1]])
rank_two = np.array([percentileofscore(two,num) for num in two[::-1]])
ax.loglog(rank_one,one,'k--',label=format('Before Interactions'))
plt.hold(True)
ax.loglog(rank_two,two,'r.-',label=format('After Interactions'))
adjust_spines(ax)
ax.set_ylabel(format('Degree'))
ax.set_xlabel(format('Rank'))
plt.legend(frameon=False)
plt.tight_layout()
plt.savefig('degree-rank.tiff')
def adjust_spines(ax,spines=['left','bottom']):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward',10)) # outward by 10 points
spine.set_smart_bounds(True)
else:
spine.set_color('none') # don't draw spine
# turn off ticks where there is no spine
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
# no yaxis ticks
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
# no xaxis ticks
ax.xaxis.set_ticks([])
| mit |
gLENTNER/Gaia-1.0 | Examples/NGC1300/ds9-regions.py | 2 | 11451 | #
# Import data from NGC1300 fits image and mask foreground stars,
# crop image and save as appropriate file format for Gaia
#
import numpy as np
from astropy.io import fits
from AstroPython import Display
from matplotlib import pyplot as plot
from matplotlib import cm
import matplotlib as mpl
mpl.rcParams['figure.facecolor'] = 'w'
plot.ion()
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import interpolate
data = {
# retrieve data from FITS files
'ir' : fits.getdata("Raw/ngc1300-dss2-ir.fits"),
'red' : fits.getdata("Raw/ngc1300-dss2-red.fits"),
'blue': fits.getdata("Raw/ngc1300-dss2-blue.fits")
}
# model background level for all filters for each aperture
background = {
'ir': [
# mean, std dev
[4499.30, 175.587],
[4549.20, 195.074],
[4816.91, 203.928],
[4906.38, 207.398],
[5039.52, 201.842],
[4778.03, 156.374],
[4670.17, 182.195],
[4801.97, 200.862],
[6584.68, 267.167],
[4682.94, 200.903],
[4459.27, 199.213],
[5019.66, 194.805],
[4499.09, 166.825],
[8001.50, 585.436]
],
'red': [
[5178.63, 222.264],
[5376.69, 191.723],
[6178.94, 282.316],
[6466.31, 144.493],
[6746.27, 302.845],
[6052.45, 195.014],
[5893.26, 185.470],
[6637.61, 204.494],
[9470.95, 269.433],
[5909.86, 162.993],
[5483.68, 175.466],
[6379.69, 311.760],
[5232.94, 192.232],
[13992.8, 899.225]
],
'blue': [
[4073.02, 203.639],
[4150.83, 226.415],
[4531.65, 356.344],
[5150.93, 222.480],
[5168.56, 220.488],
[4557.21, 249.786],
[4422.69, 226.622],
[4482.67, 243.598],
[7854.97, 493.657],
[4647.30, 219.719],
[4268.31, 239.204],
[4432.22, 250.525],
[4098.84, 185.002],
[9332.95, 2984.58]
]
}
regions = {
'ir': [
# x center, y center, radius
# ----------------------------
[640.24773, 584.59215, 7.92246],#
[653.06269, 512.42372, 7.92246],#
[603.82628, 534.68127, 10.6165],#
[551.21752, 525.23867, 6.81093],#
[543.12387, 512.42372, 9.20780],#
[342.80589, 349.20166, 9.20780],#
[557.96224, 337.73565, 8.92244],#
[388.66986, 345.83264, 6.84294],#
[415.64871, 471.95877, 6.84294],#
[290.19705, 486.79714, 6.84294],#
[253.10113, 493.88738, 6.84294],#
[430.48708, 590.33678, 8.46956],#
[603.15173, 638.89871, 11.2354],#
[517.49388, 503.32998, 4.78753]
],
'red': [
# x center, y center, radius
# ----------------------------
[640.24773, 584.59215, 7.92246],#
[653.06269, 512.42372, 7.92246],#
[603.82628, 534.68127, 10.6165],#
[551.21752, 525.23867, 6.81093],#
[543.12387, 512.42372, 9.20780],#
[342.80589, 349.20166, 9.20780],#
[557.96224, 337.73565, 8.92244],#
[388.66986, 345.83264, 6.84294],#
[415.64871, 471.95877, 6.84294],#
[290.19705, 486.79714, 6.84294],#
[253.10113, 493.88738, 6.84294],#
[430.48708, 590.33678, 8.46956],#
[603.15173, 638.89871, 11.2354],#
[517.49388, 503.32998, 4.78753]
],
'blue': [
# x center, y center, radius
# ----------------------------
[380.54816, 347.93060, 4.7041517],#
[388.20763, 305.17151, 4.7041517],#
[358.96165, 318.32767, 6.3037932],#
[327.72501, 312.68918, 4.0441544],#
[322.93634, 305.08948, 5.4673530],#
[204.11658, 208.22245, 5.4673530],#
[331.87979, 201.58613, 5.2979116],#
[231.34754, 206.25982, 4.0631584],#
[247.26867, 281.01793, 4.0631584],#
[172.77320, 289.71700, 4.0631584],#
[150.73721, 293.89279, 4.0631584],#
[255.99006, 351.18030, 5.0290022],#
[358.48154, 380.08577, 6.6712821],
[307.72436, 299.68562, 2.8426906]
]
}
cropped = {
'ir': [
# parameters for `window`
456.48938, # center x pixel
456.79009, # center y pixel
434.95953, # width in x
355.24026 # width in y
],
'red': [
# parameters for `window`
456.48938, # center x pixel
456.79009, # center y pixel
434.95953, # width in x
355.24026 # width in y
],
'blue': [
# parameters for `window`
271.53182, # center x pixel
272.06184, # center y pixel
258.26772, # width in x
210.51011 # width in y
]
}
# create and index map of the pixels
ilength, jlength = np.shape(data['red'])
imap = np.arange(ilength)
jmap = np.arange(jlength)
# display monitor to view progress
display = Display.Monitor()
print('\n Applying masks to `ngc1300-dss2-red` ...')
# replace ds9 regions with appropriate background levels
for i in imap:
for j in jmap:
# display progress
display.progress(jlength*i + j, ilength*jlength)
for a, aperture in enumerate(regions['red']):
x0, y0, r = aperture
if ( j - x0 )**2 + ( i - y0 )**2 < r**2 :
mu, sigma = background['red'][a]
data['red'][i,j] = np.random.normal(mu, sigma)
display.complete()
# create and index map of the pixels
ilength, jlength = np.shape(data['ir'])
imap = np.arange(ilength)
jmap = np.arange(jlength)
print('\n Applying masks to `ngc1300-dss2-ir` ...')
# replace ds9 regions with appropriate background levels
for i in imap:
for j in jmap:
# display progress
display.progress(jlength*i + j, ilength*jlength)
for a, aperture in enumerate(regions['ir']):
x0, y0, r = aperture
if ( j - x0 )**2 + ( i - y0 )**2 < r**2 :
mu, sigma = background['ir'][a]
data['ir'][i,j] = np.random.normal(mu, sigma)
display.complete()
# create and index map of the pixels
ilength, jlength = np.shape(data['blue'])
imap = np.arange(ilength)
jmap = np.arange(jlength)
print('\n Applying masks to `ngc1300-dss2-blue` ...')
# replace ds9 regions with appropriate background levels
for i in imap:
for j in jmap:
# display progress
display.progress(jlength*i + j, ilength*jlength)
for a, aperture in enumerate(regions['blue']):
x0, y0, r = aperture
if ( j - x0 )**2 + ( i - y0 )**2 < r**2 :
mu, sigma = background['blue'][a]
data['blue'][i,j] = np.random.normal(mu, sigma)
display.complete()
# define edges of cropped image
xmin = {
'ir' : cropped['ir'][0] - cropped['ir'][2] / 2,
'red' : cropped['red'][0] - cropped['red'][2] / 2,
'blue': cropped['blue'][0] - cropped['blue'][2] / 2
}
xmax = {
'ir' : cropped['ir'][0] + cropped['ir'][2] / 2,
'red' : cropped['red'][0] + cropped['red'][2] / 2,
'blue': cropped['blue'][0] + cropped['blue'][2] / 2
}
ymin = {
'ir' : cropped['ir'][1] - cropped['ir'][3] / 2,
'red' : cropped['red'][1] - cropped['red'][3] / 2,
'blue': cropped['blue'][1] - cropped['blue'][3] / 2
}
ymax = {
'ir' : cropped['ir'][1] + cropped['ir'][3] / 2,
'red' : cropped['red'][1] + cropped['red'][3] / 2,
'blue': cropped['blue'][1] + cropped['blue'][3] / 2
}
# crop the images
image = {
'ir': data['ir'][
np.floor(xmin['ir']):np.floor(xmax['ir']),
np.floor(ymin['ir']):np.floor(ymax['ir'])
],
'red': data['red'][
np.floor(xmin['red']):np.floor(xmax['red']),
np.floor(ymin['red']):np.floor(ymax['red'])
],
'blue': data['blue'][
np.floor(xmin['blue']):np.floor(xmax['blue']),
np.floor(ymin['blue']):np.floor(ymax['blue'])
]
}
# new index vectors
x = {
'ir' : np.arange( np.shape(image['ir'])[1] ),
'red' : np.arange( np.shape(image['red'])[1] ),
'blue' : np.arange( np.shape(image['blue'])[1] )
}
y = {
'ir' : np.arange( np.shape(image['ir'])[0] ),
'red' : np.arange( np.shape(image['red'])[0] ),
'blue' : np.arange( np.shape(image['blue'])[0] )
}
# resample (higher density pixels)
newx, newy, xx, yy, zz, f = {}, {}, {}, {}, {}, {}
print('\n Resampling models to higher density ... `ir`, ', end='')
f['ir'] = interpolate.interp2d(x['ir'], y['ir'], image['ir'])
newx['ir'] = np.linspace(0, len(x['ir']) - 1, 1000)
newy['ir'] = np.linspace(0, len(y['ir']) - 1, 1000)
#xx['ir'], yy['ir'] = np.meshgrid(newx['ir'], newy['ir'])
zz['ir'] = f['ir'](newx['ir'], newy['ir'])
print('`red`, ', end='')
f['red'] = interpolate.interp2d(x['red'], y['red'], image['red'])
newx['red'] = np.linspace(0, len(x['red']) - 1, 1000)
newy['red'] = np.linspace(0, len(y['red']) - 1, 1000)
#xx['red'], yy['red'] = np.meshgrid(newx['red'], newy['red'])
zz['red'] = f['red'](newx['red'], newy['red'])
print('`blue`, ', end='')
f['blue'] = interpolate.interp2d(x['blue'], y['blue'], image['blue'])
newx['blue'] = np.linspace(0, len(x['blue']) - 1, 1000)
newy['blue'] = np.linspace(0, len(y['blue']) - 1, 1000)
#xx['blue'], yy['blue'] = np.meshgrid(newx['blue'], newy['blue'])
zz['blue'] = f['blue'](newx['blue'], newy['blue'])
print('done')
# physical coordinates
d = 33726 #pcs
x = {
'ir' : np.linspace( -d/2, d/2, len(newx['ir'])),
'red' : np.linspace( -d/2, d/2, len(newx['red'])),
'blue' : np.linspace( -d/2, d/2, len(newx['blue']))
}
y = {
'ir' : np.linspace( -d/2, d/2, len(newy['ir'])),
'red' : np.linspace( -d/2, d/2, len(newy['red'])),
'blue' : np.linspace( -d/2, d/2, len(newy['blue']))
}
xx['ir'], yy['ir'] = np.meshgrid( x['ir'], y['ir'] )
xx['red'], yy['red'] = np.meshgrid( x['red'], y['red'] )
xx['blue'], yy['blue'] = np.meshgrid( x['blue'], y['blue'] )
# normalize/enhance data
zz["ir"][ np.where( zz["ir"] < np.median(zz["ir"]) ) ] = 0.0
zz["ir"] = zz["ir"] ** 3
zz["ir"] /= 2 * zz["ir"].max()
zz["red"][ np.where( zz["red"] < np.median(zz["red"]) ) ] = 0.0
zz["red"] = zz["red"] ** 3
zz["red"] /= 2 * zz["red"].max()
zz["blue"][ np.where( zz["blue"] < np.median(zz["blue"]) ) ] = 0.0
zz["blue"] = zz["blue"] ** 3
zz["blue"] /= 2 * zz["blue"].max()
print('\n Saving `ir` data to csv file `ncg1300-ir.csv` ...')
output = list(zz['ir'])
output = [ [ str(value) for value in row ] for row in output ]
output = [ ', '.join(row) for row in output ]
output = [ row + '\n' for row in output ]
with open('ngc1300-ir.csv', 'w') as outfile:
outfile.writelines(output)
print('\n Saving `red` data to csv file `ncg1300-red.csv` ...')
output = list(zz['red'])
output = [ [ str(value) for value in row ] for row in output ]
output = [ ', '.join(row) for row in output ]
output = [ row + '\n' for row in output ]
with open('ngc1300-red.csv', 'w') as outfile:
outfile.writelines(output)
print('\n Saving `blue` data to csv file `ncg1300-blue.csv` ...')
output = list(zz['blue'])
output = [ [ str(value) for value in row ] for row in output ]
output = [ ', '.join(row) for row in output ]
output = [ row + '\n' for row in output ]
with open('ngc1300-blue.csv', 'w') as outfile:
outfile.writelines(output)
| gpl-3.0 |
chrisburr/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 272 | 6972 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
CDSFinance/zipline | zipline/data/ffc/synthetic.py | 5 | 8343 | """
Synthetic data loaders for testing.
"""
from bcolz import ctable
from numpy import (
arange,
array,
float64,
full,
iinfo,
uint32,
)
from pandas import (
DataFrame,
Timestamp,
)
from sqlite3 import connect as sqlite3_connect
from six import iteritems
from zipline.data.ffc.base import FFCLoader
from zipline.data.ffc.frame import DataFrameFFCLoader
from zipline.data.ffc.loaders.us_equity_pricing import (
BcolzDailyBarWriter,
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
US_EQUITY_PRICING_BCOLZ_COLUMNS,
)
UINT_32_MAX = iinfo(uint32).max
def nanos_to_seconds(nanos):
return nanos / (1000 * 1000 * 1000)
class MultiColumnLoader(FFCLoader):
"""
FFCLoader that can delegate to sub-loaders.
Parameters
----------
loaders : dict
Dictionary mapping columns -> loader
"""
def __init__(self, loaders):
self._loaders = loaders
def load_adjusted_array(self, columns, dates, assets, mask):
"""
Load by delegating to sub-loaders.
"""
out = []
for col in columns:
try:
loader = self._loaders[col]
except KeyError:
raise ValueError("Couldn't find loader for %s" % col)
out.extend(loader.load_adjusted_array([col], dates, assets, mask))
return out
class ConstantLoader(MultiColumnLoader):
"""
Synthetic FFCLoader that returns a constant value for each column.
Parameters
----------
constants : dict
Map from column to value(s) to use for that column.
Values can be anything that can be passed as the first positional
argument to a DataFrame of the same shape as `mask`.
mask : pandas.DataFrame
Mask indicating when assets existed.
Indices of this frame are used to align input queries.
Notes
-----
Adjustments are unsupported with ConstantLoader.
"""
def __init__(self, constants, dates, assets):
loaders = {}
for column, const in iteritems(constants):
frame = DataFrame(
const,
index=dates,
columns=assets,
dtype=column.dtype,
)
loaders[column] = DataFrameFFCLoader(
column=column,
baseline=frame,
adjustments=None,
)
super(ConstantLoader, self).__init__(loaders)
class SyntheticDailyBarWriter(BcolzDailyBarWriter):
"""
Bcolz writer that creates synthetic data based on asset lifetime metadata.
For a given asset/date/column combination, we generate a corresponding raw
value using the following formula for OHLCV columns:
data(asset, date, column) = (100,000 * asset_id)
+ (10,000 * column_num)
+ (date - Jan 1 2000).days # ~6000 for 2015
where:
column_num('open') = 0
column_num('high') = 1
column_num('low') = 2
column_num('close') = 3
column_num('volume') = 4
We use days since Jan 1, 2000 to guarantee that there are no collisions
while also the produced values smaller than UINT32_MAX / 1000.
For 'day' and 'id', we use the standard format expected by the base class.
Parameters
----------
asset_info : DataFrame
DataFrame with asset_id as index and 'start_date'/'end_date' columns.
calendar : DatetimeIndex
Calendar to use for constructing asset lifetimes.
"""
OHLCV = ('open', 'high', 'low', 'close', 'volume')
OHLC = ('open', 'high', 'low', 'close')
PSEUDO_EPOCH = Timestamp('2000-01-01', tz='UTC')
def __init__(self, asset_info, calendar):
super(SyntheticDailyBarWriter, self).__init__()
assert (
# Using .value here to avoid having to care about UTC-aware dates.
self.PSEUDO_EPOCH.value <
calendar.min().value <=
asset_info['start_date'].min().value
)
assert (asset_info['start_date'] < asset_info['end_date']).all()
self._asset_info = asset_info
self._calendar = calendar
def _raw_data_for_asset(self, asset_id):
"""
Generate 'raw' data that encodes information about the asset.
See class docstring for a description of the data format.
"""
# Get the dates for which this asset existed according to our asset
# info.
dates = self._calendar[
self._calendar.slice_indexer(
self.asset_start(asset_id), self.asset_end(asset_id)
)
]
data = full(
(len(dates), len(US_EQUITY_PRICING_BCOLZ_COLUMNS)),
asset_id * (100 * 1000),
dtype=uint32,
)
# Add 10,000 * column-index to OHLCV columns
data[:, :5] += arange(5) * (10 * 1000)
# Add days since Jan 1 2001 for OHLCV columns.
data[:, :5] += (dates - self.PSEUDO_EPOCH).days[:, None]
frame = DataFrame(
data,
index=dates,
columns=US_EQUITY_PRICING_BCOLZ_COLUMNS,
)
frame['day'] = nanos_to_seconds(dates.asi8)
frame['id'] = asset_id
return ctable.fromdataframe(frame)
def asset_start(self, asset):
ret = self._asset_info.loc[asset]['start_date']
if ret.tz is None:
ret = ret.tz_localize('UTC')
assert ret.tzname() == 'UTC', "Unexpected non-UTC timestamp"
return ret
def asset_end(self, asset):
ret = self._asset_info.loc[asset]['end_date']
if ret.tz is None:
ret = ret.tz_localize('UTC')
assert ret.tzname() == 'UTC', "Unexpected non-UTC timestamp"
return ret
@classmethod
def expected_value(cls, asset_id, date, colname):
"""
Check that the raw value for an asset/date/column triple is as
expected.
Used by tests to verify data written by a writer.
"""
from_asset = asset_id * 100 * 1000
from_colname = cls.OHLCV.index(colname) * (10 * 1000)
from_date = (date - cls.PSEUDO_EPOCH).days
return from_asset + from_colname + from_date
def expected_values_2d(self, dates, assets, colname):
"""
Return an 2D array containing cls.expected_value(asset_id, date,
colname) for each date/asset pair in the inputs.
Values before/after an assets lifetime are filled with 0 for volume and
NaN for price columns.
"""
if colname == 'volume':
dtype = uint32
missing = 0
else:
dtype = float64
missing = float('nan')
data = full((len(dates), len(assets)), missing, dtype=dtype)
for j, asset in enumerate(assets):
start, end = self.asset_start(asset), self.asset_end(asset)
for i, date in enumerate(dates):
# No value expected for dates outside the asset's start/end
# date.
if not (start <= date <= end):
continue
data[i, j] = self.expected_value(asset, date, colname)
return data
# BEGIN SUPERCLASS INTERFACE
def gen_tables(self, assets):
for asset in assets:
yield asset, self._raw_data_for_asset(asset)
def to_uint32(self, array, colname):
if colname in {'open', 'high', 'low', 'close'}:
# Data is stored as 1000 * raw value.
assert array.max() < (UINT_32_MAX / 1000), "Test data overflow!"
return array * 1000
else:
assert colname in ('volume', 'day'), "Unknown column: %s" % colname
return array
# END SUPERCLASS INTERFACE
class NullAdjustmentReader(SQLiteAdjustmentReader):
"""
A SQLiteAdjustmentReader that stores no adjustments and uses in-memory
SQLite.
"""
def __init__(self):
conn = sqlite3_connect(':memory:')
writer = SQLiteAdjustmentWriter(conn)
empty = DataFrame({
'sid': array([], dtype=uint32),
'effective_date': array([], dtype=uint32),
'ratio': array([], dtype=float),
})
writer.write(splits=empty, mergers=empty, dividends=empty)
super(NullAdjustmentReader, self).__init__(conn)
| apache-2.0 |
ominux/scikit-learn | sklearn/metrics/metrics.py | 1 | 22059 | """Utilities to evaluate the predictive performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better
Function named as *_loss return a scalar value to minimize: the lower the
better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD Style.
import numpy as np
from ..utils import check_arrays
def unique_labels(*list_of_labels):
"""Extract an ordered integer array of unique labels
This implementation ignores any occurrence of NaNs.
"""
list_of_labels = [np.unique(labels[np.isfinite(labels)].ravel())
for labels in list_of_labels]
list_of_labels = np.concatenate(list_of_labels)
return np.unique(list_of_labels)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix cm is such that cm[i, j] is equal
to the number of observations known to be in group i but predicted
to be in group j
Parameters
----------
y_true : array, shape = [n_samples]
true targets
y_pred : array, shape = [n_samples]
estimated targets
Returns
-------
CM : array, shape = [n_classes, n_classes]
confusion matrix
References
----------
http://en.wikipedia.org/wiki/Confusion_matrix
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels, dtype=np.int)
n_labels = labels.size
CM = np.empty((n_labels, n_labels), dtype=np.long)
for i, label_i in enumerate(labels):
for j, label_j in enumerate(labels):
CM[i, j] = np.sum(
np.logical_and(y_true == label_i, y_pred == label_j))
return CM
def roc_curve(y_true, y_score):
"""compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
true binary labels
y_score : array, shape = [n_samples]
target scores, can either be probability estimates of
the positive class, confidence values, or binary decisions.
Returns
-------
fpr : array, shape = [>2]
False Positive Rates
tpr : array, shape = [>2]
True Positive Rates
thresholds : array, shape = [>2]
Thresholds on proba_ used to compute fpr and tpr
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
References
----------
http://en.wikipedia.org/wiki/Receiver_operating_characteristic
"""
y_true = y_true.ravel()
classes = np.unique(y_true)
# ROC only for binary classification
if classes.shape[0] != 2:
raise ValueError("ROC is defined for binary classification only")
y_score = y_score.ravel()
n_pos = float(np.sum(y_true == classes[1])) # nb of true positive
n_neg = float(np.sum(y_true == classes[0])) # nb of true negative
thresholds = np.unique(y_score)
neg_value, pos_value = classes[0], classes[1]
tpr = np.empty(thresholds.size, dtype=np.float) # True positive rate
fpr = np.empty(thresholds.size, dtype=np.float) # False positive rate
# Build tpr/fpr vector
current_pos_count = current_neg_count = sum_pos = sum_neg = idx = 0
signal = np.c_[y_score, y_true]
sorted_signal = signal[signal[:, 0].argsort(), :][::-1]
last_score = sorted_signal[0][0]
for score, value in sorted_signal:
if score == last_score:
if value == pos_value:
current_pos_count += 1
else:
current_neg_count += 1
else:
tpr[idx] = (sum_pos + current_pos_count) / n_pos
fpr[idx] = (sum_neg + current_neg_count) / n_neg
sum_pos += current_pos_count
sum_neg += current_neg_count
current_pos_count = 1 if value == pos_value else 0
current_neg_count = 1 if value == neg_value else 0
idx += 1
last_score = score
else:
tpr[-1] = (sum_pos + current_pos_count) / n_pos
fpr[-1] = (sum_neg + current_neg_count) / n_neg
# hard decisions, add (0,0)
if fpr.shape[0] == 2:
fpr = np.array([0.0, fpr[0], fpr[1]])
tpr = np.array([0.0, tpr[0], tpr[1]])
# trivial decisions, add (0,0) and (1,1)
elif fpr.shape[0] == 1:
fpr = np.array([0.0, fpr[0], 1.0])
tpr = np.array([0.0, tpr[0], 1.0])
return fpr, tpr, thresholds
def auc(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
Parameters
----------
x : array, shape = [n]
x coordinates
y : array, shape = [n]
y coordinates
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred)
>>> metrics.auc(fpr, tpr)
0.75
"""
x, y = check_arrays(x, y)
assert x.shape[0] == y.shape[0]
assert x.shape[0] >= 3
# reorder the data points according to the x axis
order = np.argsort(x)
x = x[order]
y = y[order]
h = np.diff(x)
area = np.sum(h * (y[1:] + y[:-1])) / 2.0
return area
def precision_score(y_true, y_pred, pos_label=1):
"""Compute the precision
The precision is the ratio :math:`tp / (tp + fp)` where tp is the
number of true positives and fp the number of false positives. The
precision is intuitively the ability of the classifier not to
label as positive a sample that is negative.
The best value is 1 and the worst value is 0.
Parameters
----------
y_true : array, shape = [n_samples]
true targets
y_pred : array, shape = [n_samples]
predicted targets
pos_label : int
in the binary classification case, give the label of the
positive class (default is 1). Everything else but 'pos_label'
is considered to belong to the negative class.
Not used in the case of multiclass classification.
Returns
-------
precision : float
precision of the positive class in binary classification or
weighted avergage of the precision of each class for the
multiclass task
"""
p, _, _, s = precision_recall_fscore_support(y_true, y_pred)
if p.shape[0] == 2:
return p[pos_label]
else:
return np.average(p, weights=s)
def recall_score(y_true, y_pred, pos_label=1):
"""Compute the recall
The recall is the ratio :math:`tp / (tp + fn)` where tp is the number of
true positives and fn the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Parameters
----------
y_true : array, shape = [n_samples]
true targets
y_pred : array, shape = [n_samples]
predicted targets
pos_label : int
in the binary classification case, give the label of the positive
class (default is 1). Everything else but 'pos_label'
is considered to belong to the negative class.
Not used in the case of multiclass classification.
Returns
-------
recall : float
recall of the positive class in binary classification or weighted
avergage of the recall of each class for the multiclass task.
"""
_, r, _, s = precision_recall_fscore_support(y_true, y_pred)
if r.shape[0] == 2:
return r[pos_label]
else:
return np.average(r, weights=s)
def fbeta_score(y_true, y_pred, beta, pos_label=1):
"""Compute fbeta score
The F_beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The beta parameter determines the weight of precision in the combined
score. beta < 1 lends more weight to precision, while beta > 1 favors
precision (beta == 0 considers only precision, beta == inf only recall).
Parameters
----------
y_true : array, shape = [n_samples]
true targets
y_pred : array, shape = [n_samples]
predicted targets
beta: float
pos_label : int
in the binary classification case, give the label of the positive
class (default is 1). Everything else but 'pos_label'
is considered to belong to the negative class.
Not used in the case of multiclass classification.
Returns
-------
fbeta_score : float
fbeta_score of the positive class in binary classification or weighted
avergage of the fbeta_score of each class for the multiclass task.
See also
--------
R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern Information Retrieval.
Addison Wesley, pp. 327-328.
http://en.wikipedia.org/wiki/F1_score
"""
_, _, f, s = precision_recall_fscore_support(y_true, y_pred, beta=beta)
if f.shape[0] == 2:
return f[pos_label]
else:
return np.average(f, weights=s)
def f1_score(y_true, y_pred, pos_label=1):
"""Compute f1 score
The F1 score can be interpreted as a weighted average of the precision
and recall, where an F1 score reaches its best value at 1 and worst
score at 0. The relative contribution of precision and recall to the f1
score are equal.
F_1 = 2 * (precision * recall) / (precision + recall)
See: http://en.wikipedia.org/wiki/F1_score
In the multi-class case, this is the weighted average of the f1-score of
each class.
Parameters
----------
y_true : array, shape = [n_samples]
true targets
y_pred : array, shape = [n_samples]
predicted targets
pos_label : int
in the binary classification case, give the label of the positive class
(default is 1). Everything else but 'pos_label'
is considered to belong to the negative class.
Not used in the case of multiclass classification.
Returns
-------
f1_score : float
f1_score of the positive class in binary classification or weighted
avergage of the f1_scores of each class for the multiclass task
References
----------
http://en.wikipedia.org/wiki/F1_score
"""
return fbeta_score(y_true, y_pred, 1, pos_label=pos_label)
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None):
"""Compute precisions, recalls, f-measures and support for each class
The precision is the ratio :math:`tp / (tp + fp)` where tp is the number of
true positives and fp the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio :math:`tp / (tp + fn)` where tp is the number of
true positives and fn the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F_beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F_beta score reaches its best
value at 1 and worst score at 0.
The F_beta score weights recall beta as much as precision. beta = 1.0 means
recall and precsion are as important.
The support is the number of occurrences of each class in y_true.
Parameters
----------
y_true : array, shape = [n_samples]
true targets
y_pred : array, shape = [n_samples]
predicted targets
beta : float, 1.0 by default
the strength of recall versus precision in the f-score
Returns
-------
precision: array, shape = [n_unique_labels], dtype = np.double
recall: array, shape = [n_unique_labels], dtype = np.double
f1_score: array, shape = [n_unique_labels], dtype = np.double
support: array, shape = [n_unique_labels], dtype = np.long
References
----------
http://en.wikipedia.org/wiki/Precision_and_recall
"""
y_true, y_pred = check_arrays(y_true, y_pred)
assert(beta > 0)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels, dtype=np.int)
n_labels = labels.size
true_pos = np.zeros(n_labels, dtype=np.double)
false_pos = np.zeros(n_labels, dtype=np.double)
false_neg = np.zeros(n_labels, dtype=np.double)
support = np.zeros(n_labels, dtype=np.long)
for i, label_i in enumerate(labels):
true_pos[i] = np.sum(y_pred[y_true == label_i] == label_i)
false_pos[i] = np.sum(y_pred[y_true != label_i] == label_i)
false_neg[i] = np.sum(y_pred[y_true == label_i] != label_i)
support[i] = np.sum(y_true == label_i)
try:
# oddly, we may get an "invalid" rather than a "divide" error here
old_err_settings = np.seterr(divide='ignore', invalid='ignore')
# precision and recall
precision = true_pos / (true_pos + false_pos)
recall = true_pos / (true_pos + false_neg)
# handle division by 0.0 in precision and recall
precision[(true_pos + false_pos) == 0.0] = 0.0
recall[(true_pos + false_neg) == 0.0] = 0.0
# fbeta score
beta2 = beta ** 2
fscore = (1 + beta2) * (precision * recall) / (
beta2 * precision + recall)
# handle division by 0.0 in fscore
fscore[(precision + recall) == 0.0] = 0.0
finally:
np.seterr(**old_err_settings)
return precision, recall, fscore, support
def classification_report(y_true, y_pred, labels=None, target_names=None):
"""Build a text report showing the main classification metrics
Parameters
----------
y_true : array, shape = [n_samples]
true targets
y_pred : array, shape = [n_samples]
estimated targets
labels : array, shape = [n_labels]
optional list of label indices to include in the report
target_names : list of strings
optional display names matching the labels (same order)
Returns
-------
report : string
Text summary of the precision, recall, f1-score for each class
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels, dtype=np.int)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%d' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading))
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["%0.2f" % float(v)]
values += ["%d" % int(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["%0.2f" % float(v)]
values += ['%d' % np.sum(s)]
report += fmt % tuple(values)
return report
def precision_recall_curve(y_true, probas_pred):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio :math:`tp / (tp + fp)` where tp is the number of
true positives and fp the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio :math:`tp / (tp + fn)` where tp is the number of
true positives and fn the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
Parameters
----------
y_true : array, shape = [n_samples]
true targets of binary classification in range {-1, 1} or {0, 1}
probas_pred : array, shape = [n_samples]
estimated probabilities
Returns
-------
precision : array, shape = [n]
Precision values
recall : array, shape = [n]
Recall values
thresholds : array, shape = [n]
Thresholds on proba_ used to compute precision and recall
"""
y_true = y_true.ravel()
labels = np.unique(y_true)
if np.all(labels == np.array([-1, 1])):
# convert {-1, 1} to boolean {0, 1} repr
y_true[y_true == -1] = 0
labels = np.array([0, 1])
if not np.all(labels == np.array([0, 1])):
raise ValueError("y_true contains non binary labels: %r" % labels)
probas_pred = probas_pred.ravel()
thresholds = np.sort(np.unique(probas_pred))
n_thresholds = thresholds.size + 1
precision = np.empty(n_thresholds)
recall = np.empty(n_thresholds)
for i, t in enumerate(thresholds):
y_pred = np.ones(len(y_true))
y_pred[probas_pred < t] = 0
p, r, _, _ = precision_recall_fscore_support(y_true, y_pred)
precision[i] = p[1]
recall[i] = r[1]
precision[-1] = 1.0
recall[-1] = 0.0
return precision, recall, thresholds
def explained_variance_score(y_true, y_pred):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Note: the explained variance is not a symmetric function.
return the explained variance
Parameters
----------
y_true : array-like
y_pred : array-like
"""
y_true, y_pred = check_arrays(y_true, y_pred)
numerator = np.var(y_true - y_pred)
denominator = np.var(y_true)
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
def r2_score(y_true, y_pred):
"""R^2 (coefficient of determination) regression score function
Best possible score is 1.0, lower values are worse.
Note: not a symmetric function.
return the R^2 score
Parameters
----------
y_true : array-like
y_pred : array-like
"""
y_true, y_pred = check_arrays(y_true, y_pred)
numerator = ((y_true - y_pred) ** 2).sum()
denominator = ((y_true - y_true.mean()) ** 2).sum()
if denominator == 0.0:
if numerator == 0.0:
return 1.0
else:
# arbitary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
return 0.0
return 1 - numerator / denominator
def zero_one_score(y_true, y_pred):
"""Zero-One classification score
Positive integer (number of good classifications).
The best performance is 1.
Return the percentage of good predictions.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
score : integer
"""
y_true, y_pred = check_arrays(y_true, y_pred)
return np.mean(y_pred == y_true)
###############################################################################
# Loss functions
def zero_one(y_true, y_pred):
"""Zero-One classification loss
Positive integer (number of misclassifications). The best performance
is 0.
Return the number of errors
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
loss : integer
"""
y_true, y_pred = check_arrays(y_true, y_pred)
return np.sum(y_pred != y_true)
def mean_square_error(y_true, y_pred):
"""Mean square error regression loss
Positive floating point value: the best value is 0.0.
return the mean square error
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
loss : float
"""
y_true, y_pred = check_arrays(y_true, y_pred)
return np.linalg.norm(y_pred - y_true) ** 2
def hinge_loss(y_true, pred_decision, pos_label=1, neg_label=-1):
"""
Cumulated hinge loss (non-regularized).
Assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, margin = y_true * pred_decision
is always negative (since the signs disagree), therefore 1 - margin
is always greater than 1. The cumulated hinge loss therefore
upperbounds the number of mistakes made by the classifier.
Parameters
----------
y_true : array, shape = [n_samples]
True target (integers)
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats)
"""
# TODO: multi-class hinge-loss
if pos_label != 1 or neg_label != -1:
# the rest of the code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
y_true = y_true.copy()
y_true[y_true == pos_label] = 1
y_true[y_true == neg_label] = -1
margin = y_true * pred_decision
losses = 1 - margin
# The hinge doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.mean(losses)
| bsd-3-clause |
uba/of | scripts/python/flo-visualizer.py | 1 | 1865 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Optical Flow Visualizer
__author__ = "Douglas Uba"
import matplotlib.image as mpimg
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
# This function reads Optical Flow Middlebury files (.flo)
def readFLO(path):
f = open(path, 'rb')
# Read magic number ("PIEH" in ASCII = float 202021.25)
magic = np.fromfile(f, np.float32, count=1)
if magic != 202021.25:
raise Exception('Invalid .flo file')
# Read width
f.seek(4)
w = np.fromfile(f, np.int32, count=1)
# Read height
f.seek(8)
h = np.fromfile(f, np.int32, count=1)
# Read (u,v) coordinates
f.seek(12)
data = np.fromfile(f, np.float32, count=w*h*2)
# Close file (.flo)
f.close()
# Reshape data into 3D array (columns, rows, bands)
dataM = np.resize(data, (h, w, 2))
# Extract u and v coordinates
u = dataM[:,:,0]
v = dataM[:,:,1]
return w,h,u,v
# Read vectors file
w,h,u,v = readFLO('../../data/lkc2f/uv.flo')
# Create grid
x, y = np.meshgrid(np.arange(0, w, 1), np.arange(0, h, 1))
# Create figure
fig = plt.figure()
# Read images
imgs=[]
imgs.append(mpimg.imread('../../data/input/satellitea.jpg'))
imgs.append(mpimg.imread('../../data/input/satelliteb.jpg'))
# Plot first image
im = plt.imshow(imgs[0], cmap='Greys')
# Reduce factor for vectors
n=9
x = x[::n,::n]
y = y[::n,::n]
u = u[::n,::n]
v = v[::n,::n]
# Plot vectors
plt.quiver(x, y, u, v, pivot='mid', color='dodgerblue', \
angles='xy', headlength=4, antialiased=True)
# Animation loop
i = 0
def updatefig(*args):
global i
im.set_array(imgs[i])
i += 1
if i == len(imgs):
i = 0
return im
anim = animation.FuncAnimation(fig, updatefig, interval=800, blit=False)
plt.show()
| gpl-3.0 |
aflaxman/scikit-learn | examples/manifold/plot_compare_methods.py | 52 | 3878 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(2, 5, 10)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
xingularity/OpenLCDFDM | testComponents/testMainClasses/plot.py | 3 | 2618 | #########################################################################
## Copyright (C) 2015 Zong-han, Xie <icbm0926@gmail.com>.
## All rights reserved.
##
## You may use this file under the terms of the BSD license as follows:
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of OpenLCDFDM nor the names of its contributors
## may be used to endorse or promote products derived from this
## software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
#########################################################################
#!/usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
import sys
myfile=sys.argv[1]
answerfile=sys.argv[2]
print(myfile)
print("vs.")
print(answerfile)
myfiledata = np.loadtxt(myfile,delimiter=',')
answerfiledata = np.loadtxt(answerfile,delimiter=',')
plotphidegree = float(sys.argv[3])
plotxdata = np.array(range(0,81))
myfileplotdata = np.zeros(81)
index = 0
for i in myfiledata:
if (abs(i[1] - plotphidegree) < 1.0e-10):
myfileplotdata[index] = i[2]
index = index + 1
index = 0
answerfileplotdata = np.zeros(81)
for i in answerfiledata:
if (abs(i[1] - plotphidegree) < 1.0e-10):
answerfileplotdata[index] = i[2]
index = index + 1
plt.plot(plotxdata, myfileplotdata, 'b')
plt.plot(plotxdata, answerfileplotdata, 'r')
plt.show()
| bsd-3-clause |
nguy/artview | reprint_colormaps.py | 2 | 1111 | import numpy as np
import matplotlib.pyplot as plt
import pyart
cmap_list = ["pyart_" + m for m in pyart.graph.cm.datad
if not m.endswith("_r")]
nrows = len(cmap_list)
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
# borrows from colormaps_reference matplotlib example
fig, axes = plt.subplots(nrows=nrows, figsize=(8, 8))
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99)
axes[0].set_title('Py-ART colormaps', fontsize=14)
axl = []
for ax, name in zip(axes, cmap_list):
ax.imshow(gradient, aspect='auto', cmap=plt.get_cmap(name))
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3]/2.
fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)
axl.append((ax, name))
# Turn off *all* ticks & spines, not just the ones with colormaps.
for ax in axes:
ax.set_axis_off()
for ax, name in axl:
extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
fig.savefig(
'artview/icons/colormaps/%s.png' % name, dpi=20, bbox_inches=extent)
fig.show()
| bsd-3-clause |
UndistinguishedFellows/RealitatAumentadaPractiques | Practica_1/MatchingImages.py | 1 | 1447 | # Aliex Cardona and Josep Casanovas
# Realitat aumentada practica 1
import cv2
import numpy as np
from matplotlib import pyplot as plt
from PyFiles.convolutionKernel import getMatchingMap
IMAGES_PATH = "../Images/"
#imageName = IMAGES_PATH + input("Source image: ")
#targetName = IMAGES_PATH + input("Target to search: ")
#detectionThreshold = input("Detection threshold: ")
imageName = IMAGES_PATH+'img1.png'
targetName = IMAGES_PATH+'t1-img1.png'
img = cv2.imread(imageName, cv2.IMREAD_GRAYSCALE)
template = cv2.imread(targetName, cv2.IMREAD_GRAYSCALE)
res = cv2.matchTemplate(img,template,0)
matching_map = getMatchingMap(img, template)
min_value_X = 0
min_value_Y = 0
min_value = 255
for i in range(matching_map.shape[0]):
for j in range(matching_map.shape[1]):
if matching_map[i][j] < min_value:
min_value = matching_map[i][j]
min_value_X = j
min_value_Y = i
cv2.rectangle(img,(min_value_X - 6, min_value_Y - 6), (min_value_X + 6, min_value_Y + 6), 0, 2)
print img.shape
print template.shape
print res.shape
print matching_map.shape
plt.subplot(1,3,1), plt.imshow(res, cmap = 'gray')
plt.title('Matching map'), plt.xticks([]), plt.yticks([])
plt.subplot(1,3,2), plt.imshow(matching_map, cmap = 'gray')
plt.title('Matching map'), plt.xticks([]), plt.yticks([])
plt.subplot(1,3,3), plt.imshow(img, cmap = 'gray')
plt.title('Matching map'), plt.xticks([]), plt.yticks([])
plt.show() | mit |
kezilu/pextant | pextant/analysis/gps_import.py | 2 | 1127 | from pextant.lib.geoshapely import GeoPolygon, LAT_LONG
import matplotlib.pyplot as plt
from osgeo import gdal
import pandas as pd
import numpy as np
pd.options.display.max_rows = 5
def get_gps_data(filename, traversal_id):
"""
Gets GPS time series gathered from a traversal
:param filename: <String> csv file from GPS team in format |date|time|name|latitude|longitude|heading
:return: <pandas DataFrame> time_stamp|latitude|longitude
"""
delimiter = r"\s+" # some of the columns are separated by a space, others by tabs, use regex to include both
header_row = 0 # the first row has all the header names
df = pd.read_csv(filename, sep=delimiter, header=header_row,
parse_dates=[['date', 'time']]) # replace date and time columns with date_time variable
time_lat_long = df[df['name'] == traversal_id][['date_time', 'latitude', 'longitude']]
gp = GeoPolygon(LAT_LONG, *time_lat_long[['latitude', 'longitude']].as_matrix().transpose())
return gp
if __name__ == '__main__':
gps = get_gps_data('../../data/waypoints/cotm_16_tracks.csv', '20160629A_EV1') | mit |
waddell/urbansim | setup.py | 3 | 1295 | # Install setuptools if not installed.
try:
import setuptools
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
# read README as the long description
with open('README.rst', 'r') as f:
long_description = f.read()
setup(
name='urbansim',
version='1.4dev',
description='Tool for modeling metropolitan real estate markets',
long_description=long_description,
author='Autodesk',
author_email='udst@autodesk.com',
license='BSD',
url='https://github.com/udst/urbansim',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: BSD License'
],
package_data={
'': ['*.html'],
},
packages=find_packages(exclude=['*.tests']),
install_requires=[
'bottle>=0.12',
'matplotlib>=1.3.1',
'numpy>=1.8.0',
'orca>=1.1',
'pandas>=0.13.1',
'patsy>=0.2.1',
'prettytable>=0.7.2',
'pyyaml>=3.10',
'scipy>=0.13.3',
'simplejson>=3.3',
'statsmodels>=0.5.0',
'tables>=3.1.0',
'toolz>=0.7.0',
'zbox>=1.2'
],
extras_require={
'pandana': ['pandana>=0.1']
}
)
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/scalar/timedelta/test_timedelta.py | 1 | 27098 | """ test the scalar Timedelta """
from datetime import timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import NaT, iNaT
import pandas.compat as compat
import pandas as pd
from pandas import (
Series, Timedelta, TimedeltaIndex, timedelta_range, to_timedelta)
import pandas.util.testing as tm
class TestTimedeltaArithmetic(object):
def test_arithmetic_overflow(self):
with pytest.raises(OverflowError):
pd.Timestamp('1700-01-01') + pd.Timedelta(13 * 19999, unit='D')
with pytest.raises(OverflowError):
pd.Timestamp('1700-01-01') + timedelta(days=13 * 19999)
def test_array_timedelta_floordiv(self):
# https://github.com/pandas-dev/pandas/issues/19761
ints = pd.date_range('2012-10-08', periods=4, freq='D').view('i8')
msg = r"Use 'array // timedelta.value'"
with tm.assert_produces_warning(FutureWarning) as m:
result = ints // pd.Timedelta(1, unit='s')
assert msg in str(m[0].message)
expected = np.array([1349654400, 1349740800, 1349827200, 1349913600],
dtype='i8')
tm.assert_numpy_array_equal(result, expected)
def test_ops_error_str(self):
# GH 13624
td = Timedelta('1 day')
for left, right in [(td, 'a'), ('a', td)]:
with pytest.raises(TypeError):
left + right
with pytest.raises(TypeError):
left > right
assert not left == right
assert left != right
def test_ops_notimplemented(self):
class Other(object):
pass
other = Other()
td = Timedelta('1 day')
assert td.__add__(other) is NotImplemented
assert td.__sub__(other) is NotImplemented
assert td.__truediv__(other) is NotImplemented
assert td.__mul__(other) is NotImplemented
assert td.__floordiv__(other) is NotImplemented
def test_unary_ops(self):
td = Timedelta(10, unit='d')
# __neg__, __pos__
assert -td == Timedelta(-10, unit='d')
assert -td == Timedelta('-10d')
assert +td == Timedelta(10, unit='d')
# __abs__, __abs__(__neg__)
assert abs(td) == td
assert abs(-td) == td
assert abs(-td) == Timedelta('10d')
class TestTimedeltaComparison(object):
def test_compare_tick(self, tick_classes):
cls = tick_classes
off = cls(4)
td = off.delta
assert isinstance(td, Timedelta)
assert td == off
assert not td != off
assert td <= off
assert td >= off
assert not td < off
assert not td > off
assert not td == 2 * off
assert td != 2 * off
assert td <= 2 * off
assert td < 2 * off
assert not td >= 2 * off
assert not td > 2 * off
def test_comparison_object_array(self):
# analogous to GH#15183
td = Timedelta('2 days')
other = Timedelta('3 hours')
arr = np.array([other, td], dtype=object)
res = arr == td
expected = np.array([False, True], dtype=bool)
assert (res == expected).all()
# 2D case
arr = np.array([[other, td],
[td, other]],
dtype=object)
res = arr != td
expected = np.array([[True, False], [False, True]], dtype=bool)
assert res.shape == expected.shape
assert (res == expected).all()
def test_compare_timedelta_ndarray(self):
# GH11835
periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.skip(reason="GH#20829 is reverted until after 0.24.0")
def test_compare_custom_object(self):
"""
Make sure non supported operations on Timedelta returns NonImplemented
and yields to other operand (GH#20829).
"""
class CustomClass(object):
def __init__(self, cmp_result=None):
self.cmp_result = cmp_result
def generic_result(self):
if self.cmp_result is None:
return NotImplemented
else:
return self.cmp_result
def __eq__(self, other):
return self.generic_result()
def __gt__(self, other):
return self.generic_result()
t = Timedelta('1s')
assert not (t == "string")
assert not (t == 1)
assert not (t == CustomClass())
assert not (t == CustomClass(cmp_result=False))
assert t < CustomClass(cmp_result=True)
assert not (t < CustomClass(cmp_result=False))
assert t == CustomClass(cmp_result=True)
@pytest.mark.parametrize("val", ["string", 1])
def test_compare_unknown_type(self, val):
# GH20829
t = Timedelta('1s')
with pytest.raises(TypeError):
t >= val
with pytest.raises(TypeError):
t > val
with pytest.raises(TypeError):
t <= val
with pytest.raises(TypeError):
t < val
class TestTimedeltas(object):
@pytest.mark.parametrize("unit, value, expected", [
('us', 9.999, 9999), ('ms', 9.999999, 9999999),
('s', 9.999999999, 9999999999)])
def test_rounding_on_int_unit_construction(self, unit, value, expected):
# GH 12690
result = Timedelta(value, unit=unit)
assert result.value == expected
result = Timedelta(str(value) + unit)
assert result.value == expected
def test_total_seconds_scalar(self):
# see gh-10939
rng = Timedelta('1 days, 10:11:12.100123456')
expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9
tm.assert_almost_equal(rng.total_seconds(), expt)
rng = Timedelta(np.nan)
assert np.isnan(rng.total_seconds())
def test_conversion(self):
for td in [Timedelta(10, unit='d'),
Timedelta('1 days, 10:11:12.012345')]:
pydt = td.to_pytimedelta()
assert td == Timedelta(pydt)
assert td == pydt
assert (isinstance(pydt, timedelta) and not isinstance(
pydt, Timedelta))
assert td == np.timedelta64(td.value, 'ns')
td64 = td.to_timedelta64()
assert td64 == np.timedelta64(td.value, 'ns')
assert td == td64
assert isinstance(td64, np.timedelta64)
# this is NOT equal and cannot be roundtriped (because of the nanos)
td = Timedelta('1 days, 10:11:12.012345678')
assert td != td.to_pytimedelta()
def test_freq_conversion(self):
# truediv
td = Timedelta('1 days 2 hours 3 ns')
result = td / np.timedelta64(1, 'D')
assert result == td.value / float(86400 * 1e9)
result = td / np.timedelta64(1, 's')
assert result == td.value / float(1e9)
result = td / np.timedelta64(1, 'ns')
assert result == td.value
# floordiv
td = Timedelta('1 days 2 hours 3 ns')
result = td // np.timedelta64(1, 'D')
assert result == 1
result = td // np.timedelta64(1, 's')
assert result == 93600
result = td // np.timedelta64(1, 'ns')
assert result == td.value
def test_fields(self):
def check(value):
# that we are int/long like
assert isinstance(value, (int, compat.long))
# compat to datetime.timedelta
rng = to_timedelta('1 days, 10:11:12')
assert rng.days == 1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 0
assert rng.nanoseconds == 0
pytest.raises(AttributeError, lambda: rng.hours)
pytest.raises(AttributeError, lambda: rng.minutes)
pytest.raises(AttributeError, lambda: rng.milliseconds)
# GH 10050
check(rng.days)
check(rng.seconds)
check(rng.microseconds)
check(rng.nanoseconds)
td = Timedelta('-1 days, 10:11:12')
assert abs(td) == Timedelta('13:48:48')
assert str(td) == "-1 days +10:11:12"
assert -td == Timedelta('0 days 13:48:48')
assert -Timedelta('-1 days, 10:11:12').value == 49728000000000
assert Timedelta('-1 days, 10:11:12').value == -49728000000000
rng = to_timedelta('-1 days, 10:11:12.100123456')
assert rng.days == -1
assert rng.seconds == 10 * 3600 + 11 * 60 + 12
assert rng.microseconds == 100 * 1000 + 123
assert rng.nanoseconds == 456
pytest.raises(AttributeError, lambda: rng.hours)
pytest.raises(AttributeError, lambda: rng.minutes)
pytest.raises(AttributeError, lambda: rng.milliseconds)
# components
tup = pd.to_timedelta(-1, 'us').components
assert tup.days == -1
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
# GH 10050
check(tup.days)
check(tup.hours)
check(tup.minutes)
check(tup.seconds)
check(tup.milliseconds)
check(tup.microseconds)
check(tup.nanoseconds)
tup = Timedelta('-1 days 1 us').components
assert tup.days == -2
assert tup.hours == 23
assert tup.minutes == 59
assert tup.seconds == 59
assert tup.milliseconds == 999
assert tup.microseconds == 999
assert tup.nanoseconds == 0
def test_iso_conversion(self):
# GH #21877
expected = Timedelta(1, unit='s')
assert to_timedelta('P0DT0H0M1S') == expected
def test_nat_converters(self):
result = to_timedelta('nat', box=False)
assert result.dtype.kind == 'm'
assert result.astype('int64') == iNaT
result = to_timedelta('nan', box=False)
assert result.dtype.kind == 'm'
assert result.astype('int64') == iNaT
@pytest.mark.parametrize('units, np_unit',
[(['Y', 'y'], 'Y'),
(['M'], 'M'),
(['W', 'w'], 'W'),
(['D', 'd', 'days', 'day', 'Days', 'Day'], 'D'),
(['m', 'minute', 'min', 'minutes', 't',
'Minute', 'Min', 'Minutes', 'T'], 'm'),
(['s', 'seconds', 'sec', 'second',
'S', 'Seconds', 'Sec', 'Second'], 's'),
(['ms', 'milliseconds', 'millisecond', 'milli',
'millis', 'l', 'MS', 'Milliseconds',
'Millisecond', 'Milli', 'Millis', 'L'], 'ms'),
(['us', 'microseconds', 'microsecond', 'micro',
'micros', 'u', 'US', 'Microseconds',
'Microsecond', 'Micro', 'Micros', 'U'], 'us'),
(['ns', 'nanoseconds', 'nanosecond', 'nano',
'nanos', 'n', 'NS', 'Nanoseconds',
'Nanosecond', 'Nano', 'Nanos', 'N'], 'ns')])
@pytest.mark.parametrize('wrapper', [np.array, list, pd.Index])
def test_unit_parser(self, units, np_unit, wrapper):
# validate all units, GH 6855, GH 21762
for unit in units:
# array-likes
expected = TimedeltaIndex([np.timedelta64(i, np_unit)
for i in np.arange(5).tolist()])
result = to_timedelta(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(range(5)), unit=unit)
tm.assert_index_equal(result, expected)
if unit == 'M':
# M is treated as minutes in string repr
expected = TimedeltaIndex([np.timedelta64(i, 'm')
for i in np.arange(5).tolist()])
str_repr = ['{}{}'.format(x, unit) for x in np.arange(5)]
result = to_timedelta(wrapper(str_repr))
tm.assert_index_equal(result, expected)
result = TimedeltaIndex(wrapper(str_repr))
tm.assert_index_equal(result, expected)
# scalar
expected = Timedelta(np.timedelta64(2, np_unit).astype(
'timedelta64[ns]'))
result = to_timedelta(2, unit=unit)
assert result == expected
result = Timedelta(2, unit=unit)
assert result == expected
if unit == 'M':
expected = Timedelta(np.timedelta64(2, 'm').astype(
'timedelta64[ns]'))
result = to_timedelta('2{}'.format(unit))
assert result == expected
result = Timedelta('2{}'.format(unit))
assert result == expected
def test_numeric_conversions(self):
assert Timedelta(0) == np.timedelta64(0, 'ns')
assert Timedelta(10) == np.timedelta64(10, 'ns')
assert Timedelta(10, unit='ns') == np.timedelta64(10, 'ns')
assert Timedelta(10, unit='us') == np.timedelta64(10, 'us')
assert Timedelta(10, unit='ms') == np.timedelta64(10, 'ms')
assert Timedelta(10, unit='s') == np.timedelta64(10, 's')
assert Timedelta(10, unit='d') == np.timedelta64(10, 'D')
def test_timedelta_conversions(self):
assert (Timedelta(timedelta(seconds=1)) ==
np.timedelta64(1, 's').astype('m8[ns]'))
assert (Timedelta(timedelta(microseconds=1)) ==
np.timedelta64(1, 'us').astype('m8[ns]'))
assert (Timedelta(timedelta(days=1)) ==
np.timedelta64(1, 'D').astype('m8[ns]'))
def test_round(self):
t1 = Timedelta('1 days 02:34:56.789123456')
t2 = Timedelta('-1 days 02:34:56.789123456')
for (freq, s1, s2) in [('N', t1, t2),
('U', Timedelta('1 days 02:34:56.789123000'),
Timedelta('-1 days 02:34:56.789123000')),
('L', Timedelta('1 days 02:34:56.789000000'),
Timedelta('-1 days 02:34:56.789000000')),
('S', Timedelta('1 days 02:34:57'),
Timedelta('-1 days 02:34:57')),
('2S', Timedelta('1 days 02:34:56'),
Timedelta('-1 days 02:34:56')),
('5S', Timedelta('1 days 02:34:55'),
Timedelta('-1 days 02:34:55')),
('T', Timedelta('1 days 02:35:00'),
Timedelta('-1 days 02:35:00')),
('12T', Timedelta('1 days 02:36:00'),
Timedelta('-1 days 02:36:00')),
('H', Timedelta('1 days 03:00:00'),
Timedelta('-1 days 03:00:00')),
('d', Timedelta('1 days'),
Timedelta('-1 days'))]:
r1 = t1.round(freq)
assert r1 == s1
r2 = t2.round(freq)
assert r2 == s2
# invalid
for freq in ['Y', 'M', 'foobar']:
pytest.raises(ValueError, lambda: t1.round(freq))
t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')
t2 = -1 * t1
t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')
t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')
# note that negative times round DOWN! so don't give whole numbers
for (freq, s1, s2) in [('N', t1, t2),
('U', t1, t2),
('L', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('S', t1a,
TimedeltaIndex(['-1 days +00:00:00',
'-2 days +23:58:58',
'-2 days +23:57:56'],
dtype='timedelta64[ns]',
freq=None)
),
('12T', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('H', t1c,
TimedeltaIndex(['-1 days',
'-1 days',
'-1 days'],
dtype='timedelta64[ns]',
freq=None)
),
('d', t1c,
pd.TimedeltaIndex([-1, -1, -1], unit='D')
)]:
r1 = t1.round(freq)
tm.assert_index_equal(r1, s1)
r2 = t2.round(freq)
tm.assert_index_equal(r2, s2)
# invalid
for freq in ['Y', 'M', 'foobar']:
pytest.raises(ValueError, lambda: t1.round(freq))
def test_contains(self):
# Checking for any NaT-like objects
# GH 13603
td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
for v in [pd.NaT, None, float('nan'), np.nan]:
assert not (v in td)
td = to_timedelta([pd.NaT])
for v in [pd.NaT, None, float('nan'), np.nan]:
assert (v in td)
def test_identity(self):
td = Timedelta(10, unit='d')
assert isinstance(td, Timedelta)
assert isinstance(td, timedelta)
def test_short_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
assert Timedelta('10') == np.timedelta64(10, 'ns')
assert Timedelta('10ns') == np.timedelta64(10, 'ns')
assert Timedelta('100') == np.timedelta64(100, 'ns')
assert Timedelta('100ns') == np.timedelta64(100, 'ns')
assert Timedelta('1000') == np.timedelta64(1000, 'ns')
assert Timedelta('1000ns') == np.timedelta64(1000, 'ns')
assert Timedelta('1000NS') == np.timedelta64(1000, 'ns')
assert Timedelta('10us') == np.timedelta64(10000, 'ns')
assert Timedelta('100us') == np.timedelta64(100000, 'ns')
assert Timedelta('1000us') == np.timedelta64(1000000, 'ns')
assert Timedelta('1000Us') == np.timedelta64(1000000, 'ns')
assert Timedelta('1000uS') == np.timedelta64(1000000, 'ns')
assert Timedelta('1ms') == np.timedelta64(1000000, 'ns')
assert Timedelta('10ms') == np.timedelta64(10000000, 'ns')
assert Timedelta('100ms') == np.timedelta64(100000000, 'ns')
assert Timedelta('1000ms') == np.timedelta64(1000000000, 'ns')
assert Timedelta('-1s') == -np.timedelta64(1000000000, 'ns')
assert Timedelta('1s') == np.timedelta64(1000000000, 'ns')
assert Timedelta('10s') == np.timedelta64(10000000000, 'ns')
assert Timedelta('100s') == np.timedelta64(100000000000, 'ns')
assert Timedelta('1000s') == np.timedelta64(1000000000000, 'ns')
assert Timedelta('1d') == conv(np.timedelta64(1, 'D'))
assert Timedelta('-1d') == -conv(np.timedelta64(1, 'D'))
assert Timedelta('1D') == conv(np.timedelta64(1, 'D'))
assert Timedelta('10D') == conv(np.timedelta64(10, 'D'))
assert Timedelta('100D') == conv(np.timedelta64(100, 'D'))
assert Timedelta('1000D') == conv(np.timedelta64(1000, 'D'))
assert Timedelta('10000D') == conv(np.timedelta64(10000, 'D'))
# space
assert Timedelta(' 10000D ') == conv(np.timedelta64(10000, 'D'))
assert Timedelta(' - 10000D ') == -conv(np.timedelta64(10000, 'D'))
# invalid
with pytest.raises(ValueError):
Timedelta('1foo')
with pytest.raises(ValueError):
Timedelta('foo')
def test_full_format_converters(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
assert Timedelta('1days') == conv(d1)
assert Timedelta('1days,') == conv(d1)
assert Timedelta('- 1days,') == -conv(d1)
assert Timedelta('00:00:01') == conv(np.timedelta64(1, 's'))
assert Timedelta('06:00:01') == conv(np.timedelta64(6 * 3600 + 1, 's'))
assert Timedelta('06:00:01.0') == conv(
np.timedelta64(6 * 3600 + 1, 's'))
assert Timedelta('06:00:01.01') == conv(np.timedelta64(
1000 * (6 * 3600 + 1) + 10, 'ms'))
assert (Timedelta('- 1days, 00:00:01') ==
conv(-d1 + np.timedelta64(1, 's')))
assert (Timedelta('1days, 06:00:01') ==
conv(d1 + np.timedelta64(6 * 3600 + 1, 's')))
assert (Timedelta('1days, 06:00:01.01') ==
conv(d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))
# invalid
with pytest.raises(ValueError):
Timedelta('- 1days, 00')
def test_overflow(self):
# GH 9442
s = Series(pd.date_range('20130101', periods=100000, freq='H'))
s[0] += pd.Timedelta('1s 1ms')
# mean
result = (s - s.min()).mean()
expected = pd.Timedelta((pd.TimedeltaIndex((s - s.min())).asi8 / len(s)
).sum())
# the computation is converted to float so
# might be some loss of precision
assert np.allclose(result.value / 1000, expected.value / 1000)
# sum
pytest.raises(ValueError, lambda: (s - s.min()).sum())
s1 = s[0:10000]
pytest.raises(ValueError, lambda: (s1 - s1.min()).sum())
s2 = s[0:1000]
result = (s2 - s2.min()).sum()
def test_pickle(self):
v = Timedelta('1 days 10:11:12.0123456')
v_p = tm.round_trip_pickle(v)
assert v == v_p
def test_timedelta_hash_equality(self):
# GH 11129
v = Timedelta(1, 'D')
td = timedelta(days=1)
assert hash(v) == hash(td)
d = {td: 2}
assert d[v] == 2
tds = timedelta_range('1 second', periods=20)
assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds)
# python timedeltas drop ns resolution
ns_td = Timedelta(1, 'ns')
assert hash(ns_td) != hash(ns_td.to_pytimedelta())
def test_implementation_limits(self):
min_td = Timedelta(Timedelta.min)
max_td = Timedelta(Timedelta.max)
# GH 12727
# timedelta limits correspond to int64 boundaries
assert min_td.value == np.iinfo(np.int64).min + 1
assert max_td.value == np.iinfo(np.int64).max
# Beyond lower limit, a NAT before the Overflow
assert (min_td - Timedelta(1, 'ns')) is NaT
with pytest.raises(OverflowError):
min_td - Timedelta(2, 'ns')
with pytest.raises(OverflowError):
max_td + Timedelta(1, 'ns')
# Same tests using the internal nanosecond values
td = Timedelta(min_td.value - 1, 'ns')
assert td is NaT
with pytest.raises(OverflowError):
Timedelta(min_td.value - 2, 'ns')
with pytest.raises(OverflowError):
Timedelta(max_td.value + 1, 'ns')
def test_total_seconds_precision(self):
# GH 19458
assert Timedelta('30S').total_seconds() == 30.0
assert Timedelta('0').total_seconds() == 0.0
assert Timedelta('-2S').total_seconds() == -2.0
assert Timedelta('5.324S').total_seconds() == 5.324
assert (Timedelta('30S').total_seconds() - 30.0) < 1e-20
assert (30.0 - Timedelta('30S').total_seconds()) < 1e-20
def test_timedelta_arithmetic(self):
data = pd.Series(['nat', '32 days'], dtype='timedelta64[ns]')
deltas = [timedelta(days=1), Timedelta(1, unit='D')]
for delta in deltas:
result_method = data.add(delta)
result_operator = data + delta
expected = pd.Series(['nat', '33 days'], dtype='timedelta64[ns]')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
result_method = data.sub(delta)
result_operator = data - delta
expected = pd.Series(['nat', '31 days'], dtype='timedelta64[ns]')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
# GH 9396
result_method = data.div(delta)
result_operator = data / delta
expected = pd.Series([np.nan, 32.], dtype='float64')
tm.assert_series_equal(result_operator, expected)
tm.assert_series_equal(result_method, expected)
def test_apply_to_timedelta(self):
timedelta_NaT = pd.to_timedelta('NaT')
list_of_valid_strings = ['00:00:01', '00:00:02']
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT]
# TODO: unused?
a = pd.to_timedelta(list_of_strings) # noqa
b = Series(list_of_strings).apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
def test_components(self):
rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')
rng.components
# with nat
s = Series(rng)
s[1] = np.nan
result = s.dt.components
assert not result.iloc[0].isna().all()
assert result.iloc[1].isna().all()
@pytest.mark.parametrize('value, expected', [
(Timedelta('10S'), True),
(Timedelta('-10S'), True),
(Timedelta(10, unit='ns'), True),
(Timedelta(0, unit='ns'), False),
(Timedelta(-10, unit='ns'), True),
(Timedelta(None), True),
(pd.NaT, True),
])
def test_truthiness(value, expected):
# https://github.com/pandas-dev/pandas/issues/21484
assert bool(value) is expected
| bsd-3-clause |
whn09/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimators_test.py | 37 | 5114 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom optimizer tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import estimator as estimator_lib
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.estimators._sklearn import train_test_split
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"transformed_x": constant_op.constant([9.])
}, {
"transformed_y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables.Variable([0.])
_ = labels
predictions = features["transformed_x"]
loss = constant_op.constant([2.])
return predictions, loss, control_flow_ops.no_op()
estimator = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator.fit(input_fn=input_fn, steps=1)
prediction = next(estimator.predict(input_fn=input_fn, as_iterable=True))
# predictions = transformed_x (9)
self.assertEqual(9., prediction)
metrics = estimator.evaluate(
input_fn=input_fn, steps=1,
metrics={"label":
metric_spec.MetricSpec(lambda predictions, labels: labels)})
# labels = transformed_y (99)
self.assertEqual(99., metrics["label"])
def testNoneFeatureEngineeringFn(self):
def input_fn():
return {
"x": constant_op.constant([1.])
}, {
"y": constant_op.constant([11.])
}
def feature_engineering_fn(features, labels):
_, _ = features, labels
return {
"x": constant_op.constant([9.])
}, {
"y": constant_op.constant([99.])
}
def model_fn(features, labels):
# dummy variable:
_ = variables.Variable([0.])
_ = labels
predictions = features["x"]
loss = constant_op.constant([2.])
return predictions, loss, control_flow_ops.no_op()
estimator_with_fe_fn = estimator_lib.Estimator(
model_fn=model_fn, feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=1)
estimator_without_fe_fn = estimator_lib.Estimator(model_fn=model_fn)
estimator_without_fe_fn.fit(input_fn=input_fn, steps=1)
# predictions = x
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(9., prediction_with_fe_fn)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(
input_fn=input_fn, as_iterable=True))
self.assertEqual(1., prediction_without_fe_fn)
class CustomOptimizer(test.TestCase):
"""Custom optimizer tests."""
def testIrisMomentum(self):
random.seed(42)
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
def custom_optimizer():
return momentum_lib.MomentumOptimizer(learning_rate=0.01, momentum=0.9)
classifier = learn.DNNClassifier(
hidden_units=[10, 20, 10],
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
n_classes=3,
optimizer=custom_optimizer,
config=learn.RunConfig(tf_random_seed=1))
classifier.fit(x_train, y_train, steps=400)
predictions = np.array(list(classifier.predict_classes(x_test)))
score = accuracy_score(y_test, predictions)
self.assertGreater(score, 0.65, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
atsao72/sympy | doc/ext/docscrape_sphinx.py | 52 | 7983 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
## Lines that are commented out are used to make the
## autosummary:: table. Since SymPy does not use the
## autosummary:: functionality, it is easiest to just comment it
## out.
#autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
#if not self._obj or hasattr(self._obj, param):
# autosum += [" %s%s" % (prefix, param)]
#else:
others.append((param, param_type, desc))
#if autosum:
# out += ['.. autosummary::', ' :toctree:', '']
# out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Other Parameters',
'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for s in self._other_keys:
out += self._str_section(s)
out += self._str_member_list('Attributes')
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
cpcloud/numpy | numpy/linalg/linalg.py | 35 | 67345 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % len(a.shape))
def _assertRankAtLeast2(*arrays):
for a in arrays:
if len(a.shape) < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % len(a.shape))
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _assertNoEmpty2d(*arrays):
for a in arrays:
if a.size == 0 and product(a.shape[-2:]) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv, einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return wrap(gufunc(a, signature=signature, extobj=extobj).astype(result_t))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
Numpy 1.8 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning)
mode = 'reduced'
elif mode in ('e', 'economic'):
msg = "The 'economic' option is deprecated.",
warnings.warn(msg, DeprecationWarning)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Same as `lower`, with 'L' for lower and 'U' for upper triangular.
Deprecated.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _ssyevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t))
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be always be of complex type. When `a` is real
the resulting eigenvalues will be real (0 imaginary part) or
occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
A : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : (..., M) ndarray
The eigenvalues, not necessarily ordered.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _ssyevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t))
vt = vt.astype(result_t)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t)
s = s.astype(_realType(result_t))
vt = vt.astype(result_t)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t))
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[0]/s[-1]
else:
return norm(x, p)*norm(inv(x), p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 1.6.0.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
return sign.astype(result_t), logdet.astype(real_t)
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(2, 2, 2
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
return _umath_linalg.det(a, signature=signature).astype(result_t)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute the extreme singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([6, 6])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
# Check the default case first and handle it immediately.
if ord is None and axis is None:
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
return sqrt(sqnorm)
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis)
elif ord == -Inf:
return abs(x).min(axis=axis)
elif ord == 0:
# Zero norm
return (x != 0).sum(axis=axis)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
if row_axis % nd == col_axis % nd:
raise ValueError('Duplicate axes given.')
if ord == 2:
return _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
return _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
return add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
return add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
return add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
return add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
return sqrt(add.reduce((x.conj() * x).real, axis=axis))
else:
raise ValueError("Invalid norm order for matrices.")
else:
raise ValueError("Improper number of dimensions to norm.")
| bsd-3-clause |
afgaron/rgz-analysis | python/rgz_sdss.py | 2 | 2584 | from astropy.io import fits
from astropy.cosmology import WMAP9
from astropy import units as u
from matplotlib import pyplot as plt
import numpy as np
from scipy import stats
plt.ion()
def get_rgz_data():
with fits.open('/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis/rgz_wise_75_sdss.fits') as f:
data = f[1].data
return data
def get_na10_data():
with fits.open('/Users/willettk/Astronomy/Research/GalaxyZoo/fits/na10.fits') as f:
data = f[1].data
return data
def plot_rgz_sdss(rgzdata,nadata):
# Plot comparison of the (g-r) CMD for both the RGZ 75% consensus sources (matched with WISE) and SDSS galaxies from NA10
fig = plt.figure(1)
fig.clf()
ax = fig.add_subplot(111)
gr_color = rgzdata['absmagG'] - rgzdata['absmagR']
absmagR = rgzdata['absmagR']
na_gr_color = nadata['g-r']
dm = WMAP9.distmod(nadata['z'])
na_absmagR = (nadata['r'] * u.mag - dm).value
h,xedges,yedges,im=plt.hist2d(absmagR,gr_color,bins=30,range=[[-24,-18],[0,1.5]],cmap=plt.cm.hot)
#h,xedges,yedges = np.histogram2d(absmagR,gr_color,bins=20,range=[[-24,-18],[0,1.5]])
xa,ya = np.array(xedges),np.array(yedges)
xcen = xa[:-1] + (xa[1]-xa[0])/2.
ycen = ya[:-1] + (ya[1]-ya[0])/2.
#cs = plt.contour(xcen,ycen,h.T,10,vmin=0.,colors='k',linewidths=1)
#ax.scatter(absmagR,gr_color,s=1,c='r')
hn,xedgesn,yedgesn = np.histogram2d(na_absmagR,na_gr_color,bins=30,range=[[-24,-18],[0,1.5]])
csn = plt.contour(xcen,ycen,hn.T,10,vmin=0.,colors='g',linewidths=2)
#ax.scatter(na_absmagR,na_gr_color,s=1,c='b')
#Create custom artists for legend
c = csn.collections[0]
art1 = plt.Line2D((0,1),(0,0), color='g')
legend = ax.legend([art1],['Nair & Abraham (2010)'],loc='upper left', shadow=True)
# Set final properties
ax.set_xlim(-18,-24)
ax.set_ylim(0.2,1.0)
ax.set_xlabel(r'$M_r$',fontsize=20)
ax.set_ylabel(r'$(g-r)$',fontsize=20)
plt.show()
# Try linear fits to the data, see if slope of the red sequence is same
ind1 = (gr_color > 0.6) & (gr_color < 1.0)
ind2 = (na_gr_color > 0.6) & (na_gr_color < 1.0)
x1,y1 = absmagR[ind1],gr_color[ind1]
x2,y2 = na_absmagR[ind2],na_gr_color[ind2]
slope1, intercept1, r_value1, p_value1, slope_std_error1 = stats.linregress(x1, y1)
slope2, intercept2, r_value2, p_value2, slope_std_error2 = stats.linregress(x2, y2)
print 'RGZ: slope = %.2f +- %.5f' % (slope1,slope_std_error1)
print 'NA10: slope = %.2f +- %.5f' % (slope2,slope_std_error2)
return None
| mit |
trungnt13/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
kmike/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 4 | 6780 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012
# License: BSD-like
import warnings
from tempfile import mkdtemp
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.cluster import Ward, WardAgglomeration, ward_tree
from sklearn.cluster.hierarchical import _hc_cut
from sklearn.feature_extraction.image import grid_to_graph
def test_structured_ward_tree():
"""
Check that we obtain the correct solution for structured ward tree.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
children, n_components, n_leaves, parent = ward_tree(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError, ward_tree, X.T, np.ones((4, 4)))
def test_unstructured_ward_tree():
"""
Check that we obtain the correct solution for unstructured ward tree.
"""
rnd = np.random.RandomState(0)
X = rnd.randn(50, 100)
for this_X in (X, X[0]):
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always", UserWarning)
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
children, n_nodes, n_leaves, parent = ward_tree(this_X.T,
n_clusters=10)
assert_equal(len(warning_list), 1)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_ward_tree():
"""
Check that the height of ward tree is sorted.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
children, n_nodes, n_leaves, parent = ward_tree(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_ward_clustering():
"""
Check that we obtain the correct number of clusters with Ward clustering.
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(100, 50)
connectivity = grid_to_graph(*mask.shape)
clustering = Ward(n_clusters=10, connectivity=connectivity)
clustering.fit(X)
# test caching
clustering = Ward(n_clusters=10, connectivity=connectivity,
memory=mkdtemp())
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
np.testing.assert_array_equal(clustering.labels_, labels)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = Ward(n_clusters=10,
connectivity=connectivity.todense())
assert_raises(TypeError, clustering.fit, X)
clustering = Ward(n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.todense()[:10, :10]))
assert_raises(ValueError, clustering.fit, X)
def test_ward_agglomeration():
"""
Check that we obtain the correct solution in a simplistic case
"""
rnd = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rnd.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
ward = WardAgglomeration(n_clusters=5, connectivity=connectivity)
ward.fit(X)
assert_true(np.size(np.unique(ward.labels_)) == 5)
Xred = ward.transform(X)
assert_true(Xred.shape[1] == 5)
Xfull = ward.inverse_transform(Xred)
assert_true(np.unique(Xfull[0]).size == 5)
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
"""Test scikit ward with full connectivity (i.e. unstructured) vs scipy
"""
from scipy.sparse import lil_matrix
n, p, k = 10, 5, 3
rnd = np.random.RandomState(0)
connectivity = lil_matrix(np.ones((n, n)))
for i in range(5):
X = .1 * rnd.normal(size=(n, p))
X -= 4 * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.ward(X)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = ward_tree(X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_popagation():
"""
Check that connectivity in the ward tree is propagated correctly during
merging.
"""
from sklearn.neighbors import NearestNeighbors
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144),
])
nn = NearestNeighbors(n_neighbors=10).fit(X)
connectivity = nn.kneighbors_graph(X)
ward = Ward(n_clusters=4, connectivity=connectivity)
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_connectivity_fixing_non_lil():
"""
Check non regression of a bug if a non item assignable connectivity is
provided with more than one component.
"""
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = Ward(connectivity=c)
with warnings.catch_warnings(record=True):
w.fit(x)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
yoojioh/gamelanpy | gamelanpy/weighted_kmeans.py | 1 | 18622 | # Original Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
#
# Edited to cover weighted K-means algorithm by Ji Oh Yoo <jioh.yoo@gmail.com>
# License: BSD 3 clause
import numpy as np
from numpy import random
from sklearn.metrics.pairwise import euclidean_distances
import _weighted_kmeans
def weighted_kmeans(X, n_clusters, weights=None, n_init=10, max_iter=300, tol=1e-4):
"""Weighted K-means clustering algorithm.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
weights : array-like, shape(n_samples,), optional, default=None
Weights for the given data
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
tol : float, optional
The relative increment in the results before declaring convergence.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
"""
if len(X.shape) == 1:
X = X[:, np.newaxis]
if n_init <= 0:
raise ValueError("Number of iteration n_init=%d must be bigger than zero." % n_init)
if n_clusters < 0:
raise ValueError("Number of clusters n_clusters=%d must be at least 1." % n_clusters)
if n_clusters > X.shape[0]:
raise ValueError("Number of clusters n_clusters=%d is larger than number of samples %d"
% (n_clusters, X.shape[0]))
X = np.array(X, dtype=float)
tol = _tolerance(X, tol)
# subtract mean of X for more accurate distance computations
X_mean = X.mean(axis=0)
X -= X_mean
# precompute squared norms of data points
x_squared_norms = np.einsum('ij,ij->i', X, X)
best_labels, best_inertia, best_centers = None, None, None
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _weighted_kmeans_single(
X, n_clusters, weights=weights, max_iter=max_iter, tol=tol,
x_squared_norms=x_squared_norms)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
# add mean of X to original distribution
best_centers += X_mean
return best_centers, best_labels, best_inertia
def _weighted_kmeans_single(X, n_clusters, x_squared_norms, weights=None, max_iter=300, tol=1e-4):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
weights : array-like, shape(n_samples,), optional, default=None
Weights for the given data
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
tol: float, optional
The relative increment in the results before declaring convergence.
x_squared_norms: array
Precomputed x_squared_norms.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _k_init(X, n_clusters, x_squared_norms=x_squared_norms)
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers, weights=weights,
distances=distances)
# computation of the means is also called the M-step of EM
centers = _weighted_kmeans._centers_dense(X, labels, n_clusters, distances, weights)
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
# break if diff is less than tol at this iteration
diff = centers_old - centers
if (diff * diff).sum() <= tol:
break
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances, weights=None):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
weights : (optional) numpy array, shape (n_samples,)
Weights for each point in X. Default is None
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
k = centers.shape[0]
all_distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = all_distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
if weights is None:
inertia = mindist.sum()
else:
inertia = (mindist * weights).sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers, weights=None, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float64 array, shape (k, n_features)
The cluster centers.
weights : array-like, shape(n_samples,), optional, default=None
Weights for the given data
distances: float64 array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=np.float64)
# distances will be changed in-place
return _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances, weights=weights)
def _k_init(X, n_clusters, x_squared_norms, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
x_squared_norms: array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state: numpy.RandomState
The generator used to initialize the centers.
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features))
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random.randint(n_samples)
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0], X, Y_norm_squared=x_squared_norms, squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
variances = np.var(X, axis=0)
return np.mean(variances) * tol
class WeightedKMeans():
"""Weighted K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
"""
def __init__(self, n_clusters=8, n_init=10, max_iter=300, tol=1e-4):
self.n_clusters = n_clusters
self.max_iter = max_iter
self.tol = tol
self.n_init = n_init
def _check_fitted(self):
if not hasattr(self, "cluster_centers_"):
raise AttributeError("Model has not been trained yet.")
def fit(self, X, weights=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
self.cluster_centers_, self.labels_, self.inertia_ = \
weighted_kmeans(
X, n_clusters=self.n_clusters, weights=weights,
n_init=self.n_init, max_iter=self.max_iter, tol=self.tol)
return self
def fit_predict(self, X):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
self._check_fitted()
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
self._check_fitted()
x_squared_norms = np.einsum('ij,ij->i', X, X)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
self._check_fitted()
x_squared_norms = np.einsum('ij,ij->i', X, X)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
| mit |
mjudsp/Tsallis | sklearn/mixture/tests/test_gaussian_mixture.py | 4 | 35709 | import sys
import warnings
import numpy as np
from scipy import stats, linalg
from sklearn.covariance import EmpiricalCovariance
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.mixture.gaussian_mixture import GaussianMixture
from sklearn.mixture.gaussian_mixture import (
_estimate_gaussian_precisions_cholesky_full,
_estimate_gaussian_precisions_cholesky_tied,
_estimate_gaussian_precisions_cholesky_diag,
_estimate_gaussian_precisions_cholesky_spherical)
from sklearn.exceptions import ConvergenceWarning, NotFittedError
from sklearn.utils.extmath import fast_logdet
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']
def generate_data(n_samples, n_features, weights, means, precisions,
covariance_type):
rng = np.random.RandomState(0)
X = []
if covariance_type == 'spherical':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['spherical'])):
X.append(rng.multivariate_normal(m, c * np.eye(n_features),
int(np.round(w * n_samples))))
if covariance_type == 'diag':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['diag'])):
X.append(rng.multivariate_normal(m, np.diag(c),
int(np.round(w * n_samples))))
if covariance_type == 'tied':
for _, (w, m) in enumerate(zip(weights, means)):
X.append(rng.multivariate_normal(m, precisions['tied'],
int(np.round(w * n_samples))))
if covariance_type == 'full':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['full'])):
X.append(rng.multivariate_normal(m, c,
int(np.round(w * n_samples))))
X = np.vstack(X)
return X
class RandomData(object):
def __init__(self, rng, n_samples=500, n_components=2, n_features=2,
scale=50):
self.n_samples = n_samples
self.n_components = n_components
self.n_features = n_features
self.weights = rng.rand(n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.rand(n_components, n_features) * scale
self.covariances = {
'spherical': .5 + rng.rand(n_components),
'diag': (.5 + rng.rand(n_components, n_features)) ** 2,
'tied': make_spd_matrix(n_features, random_state=rng),
'full': np.array([
make_spd_matrix(n_features, random_state=rng) * .5
for _ in range(n_components)])}
self.precisions = {
'spherical': 1. / self.covariances['spherical'],
'diag': 1. / self.covariances['diag'],
'tied': linalg.inv(self.covariances['tied']),
'full': np.array([linalg.inv(covariance)
for covariance in self.covariances['full']])}
self.X = dict(zip(COVARIANCE_TYPE, [generate_data(
n_samples, n_features, self.weights, self.means, self.covariances,
covar_type) for covar_type in COVARIANCE_TYPE]))
self.Y = np.hstack([k * np.ones(int(np.round(w * n_samples)))
for k, w in enumerate(self.weights)])
def test_gaussian_mixture_attributes():
# test bad parameters
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
n_components_bad = 0
gmm = GaussianMixture(n_components=n_components_bad)
assert_raise_message(ValueError,
"Invalid value for 'n_components': %d "
"Estimation requires at least one component"
% n_components_bad, gmm.fit, X)
# covariance_type should be in [spherical, diag, tied, full]
covariance_type_bad = 'bad_covariance_type'
gmm = GaussianMixture(covariance_type=covariance_type_bad)
assert_raise_message(ValueError,
"Invalid value for 'covariance_type': %s "
"'covariance_type' should be in "
"['spherical', 'tied', 'diag', 'full']"
% covariance_type_bad,
gmm.fit, X)
tol_bad = -1
gmm = GaussianMixture(tol=tol_bad)
assert_raise_message(ValueError,
"Invalid value for 'tol': %.5f "
"Tolerance used by the EM must be non-negative"
% tol_bad, gmm.fit, X)
reg_covar_bad = -1
gmm = GaussianMixture(reg_covar=reg_covar_bad)
assert_raise_message(ValueError,
"Invalid value for 'reg_covar': %.5f "
"regularization on covariance must be "
"non-negative" % reg_covar_bad, gmm.fit, X)
max_iter_bad = 0
gmm = GaussianMixture(max_iter=max_iter_bad)
assert_raise_message(ValueError,
"Invalid value for 'max_iter': %d "
"Estimation requires at least one iteration"
% max_iter_bad, gmm.fit, X)
n_init_bad = 0
gmm = GaussianMixture(n_init=n_init_bad)
assert_raise_message(ValueError,
"Invalid value for 'n_init': %d "
"Estimation requires at least one run"
% n_init_bad, gmm.fit, X)
init_params_bad = 'bad_method'
gmm = GaussianMixture(init_params=init_params_bad)
assert_raise_message(ValueError,
"Unimplemented initialization method '%s'"
% init_params_bad,
gmm.fit, X)
# test good parameters
n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1
covariance_type, init_params = 'full', 'random'
gmm = GaussianMixture(n_components=n_components, tol=tol, n_init=n_init,
max_iter=max_iter, reg_covar=reg_covar,
covariance_type=covariance_type,
init_params=init_params).fit(X)
assert_equal(gmm.n_components, n_components)
assert_equal(gmm.covariance_type, covariance_type)
assert_equal(gmm.tol, tol)
assert_equal(gmm.reg_covar, reg_covar)
assert_equal(gmm.max_iter, max_iter)
assert_equal(gmm.n_init, n_init)
assert_equal(gmm.init_params, init_params)
def test_check_X():
from sklearn.mixture.base import _check_X
rng = np.random.RandomState(0)
n_samples, n_components, n_features = 10, 2, 2
X_bad_dim = rng.rand(n_components - 1, n_features)
assert_raise_message(ValueError,
'Expected n_samples >= n_components '
'but got n_components = %d, n_samples = %d'
% (n_components, X_bad_dim.shape[0]),
_check_X, X_bad_dim, n_components)
X_bad_dim = rng.rand(n_components, n_features + 1)
assert_raise_message(ValueError,
'Expected the input data X have %d features, '
'but got %d features'
% (n_features, X_bad_dim.shape[1]),
_check_X, X_bad_dim, n_components, n_features)
X = rng.rand(n_samples, n_features)
assert_array_equal(X, _check_X(X, n_components, n_features))
def test_check_weights():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
X = rand_data.X['full']
g = GaussianMixture(n_components=n_components)
# Check bad shape
weights_bad_shape = rng.rand(n_components, 1)
g.weights_init = weights_bad_shape
assert_raise_message(ValueError,
"The parameter 'weights' should have the shape of "
"(%d,), but got %s" %
(n_components, str(weights_bad_shape.shape)),
g.fit, X)
# Check bad range
weights_bad_range = rng.rand(n_components) + 1
g.weights_init = weights_bad_range
assert_raise_message(ValueError,
"The parameter 'weights' should be in the range "
"[0, 1], but got max value %.5f, min value %.5f"
% (np.min(weights_bad_range),
np.max(weights_bad_range)),
g.fit, X)
# Check bad normalization
weights_bad_norm = rng.rand(n_components)
weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1)
g.weights_init = weights_bad_norm
assert_raise_message(ValueError,
"The parameter 'weights' should be normalized, "
"but got sum(weights) = %.5f"
% np.sum(weights_bad_norm),
g.fit, X)
# Check good weights matrix
weights = rand_data.weights
g = GaussianMixture(weights_init=weights, n_components=n_components)
g.fit(X)
assert_array_equal(weights, g.weights_init)
def test_check_means():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components, n_features = rand_data.n_components, rand_data.n_features
X = rand_data.X['full']
g = GaussianMixture(n_components=n_components)
# Check means bad shape
means_bad_shape = rng.rand(n_components + 1, n_features)
g.means_init = means_bad_shape
assert_raise_message(ValueError,
"The parameter 'means' should have the shape of ",
g.fit, X)
# Check good means matrix
means = rand_data.means
g.means_init = means
g.fit(X)
assert_array_equal(means, g.means_init)
def test_check_precisions():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components, n_features = rand_data.n_components, rand_data.n_features
# Define the bad precisions for each covariance_type
precisions_bad_shape = {
'full': np.ones((n_components + 1, n_features, n_features)),
'tied': np.ones((n_features + 1, n_features + 1)),
'diag': np.ones((n_components + 1, n_features)),
'spherical': np.ones((n_components + 1))}
# Define not positive-definite precisions
precisions_not_pos = np.ones((n_components, n_features, n_features))
precisions_not_pos[0] = np.eye(n_features)
precisions_not_pos[0, 0, 0] = -1.
precisions_not_positive = {
'full': precisions_not_pos,
'tied': precisions_not_pos[0],
'diag': -1. * np.ones((n_components, n_features)),
'spherical': -1. * np.ones(n_components)}
not_positive_errors = {
'full': 'symmetric, positive-definite',
'tied': 'symmetric, positive-definite',
'diag': 'positive',
'spherical': 'positive'}
for covar_type in COVARIANCE_TYPE:
X = RandomData(rng).X[covar_type]
g = GaussianMixture(n_components=n_components,
covariance_type=covar_type,
random_state=rng)
# Check precisions with bad shapes
g.precisions_init = precisions_bad_shape[covar_type]
assert_raise_message(ValueError,
"The parameter '%s precision' should have "
"the shape of" % covar_type,
g.fit, X)
# Check not positive precisions
g.precisions_init = precisions_not_positive[covar_type]
assert_raise_message(ValueError,
"'%s precision' should be %s"
% (covar_type, not_positive_errors[covar_type]),
g.fit, X)
# Check the correct init of precisions_init
g.precisions_init = rand_data.precisions[covar_type]
g.fit(X)
assert_array_equal(rand_data.precisions[covar_type], g.precisions_init)
def test_suffstat_sk_full():
# compare the precision matrix compute from the
# EmpiricalCovariance.covariance fitted on X*sqrt(resp)
# with _sufficient_sk_full, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
# special case 1, assuming data is "centered"
X = rng.rand(n_samples, n_features)
resp = rng.rand(n_samples, 1)
X_resp = np.sqrt(resp) * X
nk = np.array([n_samples])
xk = np.zeros((1, n_features))
precs_pred = _estimate_gaussian_precisions_cholesky_full(resp, X,
nk, xk, 0)
covars_pred = linalg.inv(np.dot(precs_pred[0], precs_pred[0].T))
ecov = EmpiricalCovariance(assume_centered=True)
ecov.fit(X_resp)
assert_almost_equal(ecov.error_norm(covars_pred, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred, norm='spectral'), 0)
# special case 2, assuming resp are all ones
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean(axis=0).reshape((1, -1))
precs_pred = _estimate_gaussian_precisions_cholesky_full(resp, X,
nk, xk, 0)
covars_pred = linalg.inv(np.dot(precs_pred[0], precs_pred[0].T))
ecov = EmpiricalCovariance(assume_centered=False)
ecov.fit(X)
assert_almost_equal(ecov.error_norm(covars_pred, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred, norm='spectral'), 0)
def test_suffstat_sk_tied():
# use equation Nk * Sk / N = S_tied
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
precs_pred_full = _estimate_gaussian_precisions_cholesky_full(resp, X,
nk, xk, 0)
covars_pred_full = [linalg.inv(np.dot(precision_chol, precision_chol.T))
for precision_chol in precs_pred_full]
covars_pred_full = np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full,
0) / n_samples
precs_pred_tied = _estimate_gaussian_precisions_cholesky_tied(resp, X,
nk, xk, 0)
covars_pred_tied = linalg.inv(np.dot(precs_pred_tied, precs_pred_tied.T))
ecov = EmpiricalCovariance()
ecov.covariance_ = covars_pred_full
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm='spectral'), 0)
def test_suffstat_sk_diag():
# test against 'full' case
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
precs_pred_full = _estimate_gaussian_precisions_cholesky_full(resp, X,
nk, xk, 0)
covars_pred_full = [linalg.inv(np.dot(precision_chol, precision_chol.T))
for precision_chol in precs_pred_full]
precs_pred_diag = _estimate_gaussian_precisions_cholesky_diag(resp, X,
nk, xk, 0)
covars_pred_diag = np.array([np.diag(1. / d) ** 2
for d in precs_pred_diag])
ecov = EmpiricalCovariance()
for (cov_full, cov_diag) in zip(covars_pred_full, covars_pred_diag):
ecov.covariance_ = np.diag(np.diag(cov_full))
assert_almost_equal(ecov.error_norm(cov_diag, norm='frobenius'), 0)
assert_almost_equal(ecov.error_norm(cov_diag, norm='spectral'), 0)
def test_gaussian_suffstat_sk_spherical():
# computing spherical covariance equals to the variance of one-dimension
# data after flattening, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
X = rng.rand(n_samples, n_features)
X = X - X.mean()
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean()
precs_pred_spherical = _estimate_gaussian_precisions_cholesky_spherical(
resp, X, nk, xk, 0)
covars_pred_spherical = (np.dot(X.flatten().T, X.flatten()) /
(n_features * n_samples))
assert_almost_equal(1. / precs_pred_spherical ** 2, covars_pred_spherical)
def _naive_lmvnpdf_diag(X, means, covars):
resp = np.empty((len(X), len(means)))
stds = np.sqrt(covars)
for i, (mean, std) in enumerate(zip(means, stds)):
resp[:, i] = stats.norm.logpdf(X, mean, std).sum(axis=1)
return resp
def test_gaussian_mixture_log_probabilities():
from sklearn.mixture.gaussian_mixture import (
_estimate_log_gaussian_prob_full,
_estimate_log_gaussian_prob_tied,
_estimate_log_gaussian_prob_diag,
_estimate_log_gaussian_prob_spherical)
# test aginst with _naive_lmvnpdf_diag
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_samples = 500
n_features = rand_data.n_features
n_components = rand_data.n_components
means = rand_data.means
covars_diag = rng.rand(n_components, n_features)
X = rng.rand(n_samples, n_features)
log_prob_naive = _naive_lmvnpdf_diag(X, means, covars_diag)
# full covariances
precs_full = np.array([np.diag(1. / np.sqrt(x)) for x in covars_diag])
log_prob = _estimate_log_gaussian_prob_full(X, means, precs_full)
assert_array_almost_equal(log_prob, log_prob_naive)
# diag covariances
precs_chol_diag = 1. / np.sqrt(covars_diag)
log_prob = _estimate_log_gaussian_prob_diag(X, means, precs_chol_diag)
assert_array_almost_equal(log_prob, log_prob_naive)
# tied
covars_tied = np.array([x for x in covars_diag]).mean(axis=0)
precs_tied = np.diag(np.sqrt(1. / covars_tied))
log_prob_naive = _naive_lmvnpdf_diag(X, means,
[covars_tied] * n_components)
log_prob = _estimate_log_gaussian_prob_tied(X, means, precs_tied)
assert_array_almost_equal(log_prob, log_prob_naive)
# spherical
covars_spherical = covars_diag.mean(axis=1)
precs_spherical = 1. / np.sqrt(covars_diag.mean(axis=1))
log_prob_naive = _naive_lmvnpdf_diag(X, means,
[[k] * n_features for k in
covars_spherical])
log_prob = _estimate_log_gaussian_prob_spherical(X, means, precs_spherical)
assert_array_almost_equal(log_prob, log_prob_naive)
# skip tests on weighted_log_probabilities, log_weights
def test_gaussian_mixture_estimate_log_prob_resp():
# test whether responsibilities are normalized
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=5)
n_samples = rand_data.n_samples
n_features = rand_data.n_features
n_components = rand_data.n_components
X = rng.rand(n_samples, n_features)
for covar_type in COVARIANCE_TYPE:
weights = rand_data.weights
means = rand_data.means
precisions = rand_data.precisions[covar_type]
g = GaussianMixture(n_components=n_components, random_state=rng,
weights_init=weights, means_init=means,
precisions_init=precisions,
covariance_type=covar_type)
g.fit(X)
resp = g.predict_proba(X)
assert_array_almost_equal(resp.sum(axis=1), np.ones(n_samples))
assert_array_equal(g.weights_init, weights)
assert_array_equal(g.means_init, means)
assert_array_equal(g.precisions_init, precisions)
def test_gaussian_mixture_predict_predict_proba():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
Y = rand_data.Y
g = GaussianMixture(n_components=rand_data.n_components,
random_state=rng, weights_init=rand_data.weights,
means_init=rand_data.means,
precisions_init=rand_data.precisions[covar_type],
covariance_type=covar_type)
# Check a warning message arrive if we don't do fit
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this method.", g.predict, X)
g.fit(X)
Y_pred = g.predict(X)
Y_pred_proba = g.predict_proba(X).argmax(axis=1)
assert_array_equal(Y_pred, Y_pred_proba)
assert_greater(adjusted_rand_score(Y, Y_pred), .95)
def test_gaussian_mixture_fit():
# recover the ground truth
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_features = rand_data.n_features
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=20,
reg_covar=0, random_state=rng,
covariance_type=covar_type)
g.fit(X)
# needs more data to pass the test with rtol=1e-7
assert_allclose(np.sort(g.weights_), np.sort(rand_data.weights),
rtol=0.1, atol=1e-2)
arg_idx1 = g.means_[:, 0].argsort()
arg_idx2 = rand_data.means[:, 0].argsort()
assert_allclose(g.means_[arg_idx1], rand_data.means[arg_idx2],
rtol=0.1, atol=1e-2)
if covar_type == 'full':
prec_pred = g.precisions_
prec_test = rand_data.precisions['full']
elif covar_type == 'tied':
prec_pred = np.array([g.precisions_] * n_components)
prec_test = np.array([rand_data.precisions['tied']] * n_components)
elif covar_type == 'spherical':
prec_pred = np.array([np.eye(n_features) * c
for c in g.precisions_])
prec_test = np.array([np.eye(n_features) * c for c in
rand_data.precisions['spherical']])
elif covar_type == 'diag':
prec_pred = np.array([np.diag(d) for d in g.precisions_])
prec_test = np.array([np.diag(d) for d in
rand_data.precisions['diag']])
arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort()
arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort()
for k, h in zip(arg_idx1, arg_idx2):
ecov = EmpiricalCovariance()
ecov.covariance_ = prec_test[h]
# the accuracy depends on the number of data and randomness, rng
assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.1)
def test_gaussian_mixture_fit_best_params():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
n_init = 10
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type)
ll = []
for _ in range(n_init):
g.fit(X)
ll.append(g.score(X))
ll = np.array(ll)
g_best = GaussianMixture(n_components=n_components,
n_init=n_init, reg_covar=0, random_state=rng,
covariance_type=covar_type)
g_best.fit(X)
assert_almost_equal(ll.min(), g_best.score(X))
def test_gaussian_mixture_fit_convergence_warning():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=1)
n_components = rand_data.n_components
max_iter = 1
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1,
max_iter=max_iter, reg_covar=0, random_state=rng,
covariance_type=covar_type)
assert_warns_message(ConvergenceWarning,
'Initialization %d did not converged. '
'Try different init parameters, '
'or increase n_init, tol '
'or check for degenerate data.'
% max_iter, g.fit, X)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 5, 2
X = rng.randn(n_samples, n_features)
for cv_type in COVARIANCE_TYPE:
train1 = GaussianMixture(n_components=n_components,
covariance_type=cv_type,
random_state=rng).fit(X).score(X)
train2 = GaussianMixture(n_components=n_components,
covariance_type=cv_type,
random_state=rng, n_init=5).fit(X).score(X)
assert_greater_equal(train2, train1)
def test_gaussian_mixture_n_parameters():
# Test that the right number of parameters is estimated
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 5, 2
X = rng.randn(n_samples, n_features)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in COVARIANCE_TYPE:
g = GaussianMixture(
n_components=n_components, covariance_type=cv_type,
random_state=rng).fit(X)
assert_equal(g._n_parameters(), n_params[cv_type])
def test_bic_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
rng = np.random.RandomState(0)
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
bic_full = GaussianMixture(n_components=n_components,
covariance_type='full',
random_state=rng).fit(X).bic(X)
for covariance_type in ['tied', 'diag', 'spherical']:
bic = GaussianMixture(n_components=n_components,
covariance_type=covariance_type,
random_state=rng).fit(X).bic(X)
assert_almost_equal(bic_full, bic)
def test_gaussian_mixture_aic_bic():
# Test the aic and bic criteria
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 3, 2
X = rng.randn(n_samples, n_features)
# standard gaussian entropy
sgh = 0.5 * (fast_logdet(np.cov(X.T, bias=1)) +
n_features * (1 + np.log(2 * np.pi)))
for cv_type in COVARIANCE_TYPE:
g = GaussianMixture(
n_components=n_components, covariance_type=cv_type,
random_state=rng, max_iter=200)
g.fit(X)
aic = 2 * n_samples * sgh + 2 * g._n_parameters()
bic = (2 * n_samples * sgh +
np.log(n_samples) * g._n_parameters())
bound = n_features / np.sqrt(n_samples)
assert_true((g.aic(X) - aic) / n_samples < bound)
assert_true((g.bic(X) - bic) / n_samples < bound)
def test_gaussian_mixture_verbose():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type,
verbose=1)
h = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
h.fit(X)
finally:
sys.stdout = old_stdout
def test_warm_start():
random_state = 0
rng = np.random.RandomState(random_state)
n_samples, n_features, n_components = 500, 2, 2
X = rng.rand(n_samples, n_features)
# Assert the warm_start give the same result for the same number of iter
g = GaussianMixture(n_components=n_components, n_init=1,
max_iter=2, reg_covar=0, random_state=random_state,
warm_start=False)
h = GaussianMixture(n_components=n_components, n_init=1,
max_iter=1, reg_covar=0, random_state=random_state,
warm_start=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
g.fit(X)
score1 = h.fit(X).score(X)
score2 = h.fit(X).score(X)
assert_almost_equal(g.weights_, h.weights_)
assert_almost_equal(g.means_, h.means_)
assert_almost_equal(g.precisions_, h.precisions_)
assert_greater(score2, score1)
# Assert that by using warm_start we can converge to a good solution
g = GaussianMixture(n_components=n_components, n_init=1,
max_iter=5, reg_covar=0, random_state=random_state,
warm_start=False, tol=1e-6)
h = GaussianMixture(n_components=n_components, n_init=1,
max_iter=5, reg_covar=0, random_state=random_state,
warm_start=True, tol=1e-6)
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
g.fit(X)
h.fit(X).fit(X)
assert_true(not g.converged_)
assert_true(h.converged_)
def test_score():
covar_type = 'full'
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
# Check the error message if we don't call fit
gmm1 = GaussianMixture(n_components=n_components, n_init=1,
max_iter=1, reg_covar=0, random_state=rng,
covariance_type=covar_type)
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this method.", gmm1.score, X)
# Check score value
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
gmm1.fit(X)
gmm_score = gmm1.score(X)
gmm_score_proba = gmm1.score_samples(X).mean()
assert_almost_equal(gmm_score, gmm_score_proba)
# Check if the score increase
gmm2 = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng,
covariance_type=covar_type).fit(X)
assert_greater(gmm2.score(X), gmm1.score(X))
def test_score_samples():
covar_type = 'full'
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
# Check the error message if we don't call fit
gmm = GaussianMixture(n_components=n_components, n_init=1, reg_covar=0,
random_state=rng, covariance_type=covar_type)
assert_raise_message(NotFittedError,
"This GaussianMixture instance is not fitted "
"yet. Call 'fit' with appropriate arguments "
"before using this method.", gmm.score_samples, X)
gmm_score_samples = gmm.fit(X).score_samples(X)
assert_equal(gmm_score_samples.shape[0], rand_data.n_samples)
def test_monotonic_likelihood():
# We check that each step of the EM without regularization improve
# monotonically the training set likelihood
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, reg_covar=0,
warm_start=True, max_iter=1, random_state=rng,
tol=1e-7)
current_log_likelihood = -np.infty
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(300):
prev_log_likelihood = current_log_likelihood
try:
current_log_likelihood = gmm.fit(X).score(X)
except ConvergenceWarning:
pass
assert_greater_equal(current_log_likelihood,
prev_log_likelihood)
if gmm.converged_:
break
def test_regularisation():
# We train the GaussianMixture on degenerate data by defining two clusters
# of a 0 covariance.
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = np.vstack((np.ones((n_samples // 2, n_features)),
np.zeros((n_samples // 2, n_features))))
for covar_type in COVARIANCE_TYPE:
gmm = GaussianMixture(n_components=n_samples, reg_covar=0,
covariance_type=covar_type, random_state=rng)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert_raise_message(ValueError,
"The algorithm has diverged because of too "
"few samples per components. "
"Try to decrease the number of components, "
"or increase reg_covar.", gmm.fit, X)
gmm.set_params(reg_covar=1e-6).fit(X)
def test_property():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(n_components=n_components,
covariance_type=covar_type, random_state=rng)
gmm.fit(X)
print(covar_type)
if covar_type is 'full':
for prec, covar in zip(gmm.precisions_, gmm.covariances_):
assert_array_almost_equal(linalg.inv(prec), covar)
elif covar_type is 'tied':
assert_array_almost_equal(linalg.inv(gmm.precisions_),
gmm.covariances_)
else:
assert_array_almost_equal(gmm.precisions_, 1. / gmm.covariances_)
| bsd-3-clause |
antoinecarme/pyaf | tests/perf/test_cycles_full_long_long.py | 1 | 1541 | import pandas as pd
import numpy as np
import pyaf.ForecastEngine as autof
import pyaf.Bench.TS_datasets as tsds
#get_ipython().magic('matplotlib inline')
def test_nbrows_cycle(nbrows , cyc):
# lValues = [ k for k in range(2,24, 4)];
# lValues = lValues + [ k for k in range(24, 128, 8)];
# for cyc in lValues:
print("TEST_CYCLES_START", nbrows, cyc)
b1 = tsds.generate_random_TS(N = nbrows , FREQ = 'H', seed = 0, trendtype = "constant", cycle_length = cyc, transform = "None", sigma = 0.1, exog_count = 0, ar_order=0);
df = b1.mPastData
# df.tail(10)
# df[:-10].tail()
# df[:-10:-1]
# df.describe()
lEngine = autof.cForecastEngine()
lEngine.mOptions.mCycleLengths = [ k for k in range(2, cyc * 4) ];
lEngine
H = cyc * 2;
lEngine.train(df , b1.mTimeVar , b1.mSignalVar, H);
lEngine.getModelInfo();
lEngine.mSignalDecomposition.mBestModel.mTimeInfo.mResolution
dfapp_in = df.copy();
dfapp_in.tail()
# H = 12
dfapp_out = lEngine.forecast(dfapp_in, H);
dfapp_out.tail(2 * H)
print("Forecast Columns " , dfapp_out.columns);
Forecast_DF = dfapp_out[[b1.mTimeVar , b1.mSignalVar, b1.mSignalVar + '_Forecast']]
print(Forecast_DF.info())
print("Forecasts\n" , Forecast_DF.tail(H).values);
print("\n\n<ModelInfo>")
print(lEngine.to_json());
print("</ModelInfo>\n\n")
print("\n\n<Forecast>")
print(Forecast_DF.tail(H).to_json(date_format='iso'))
print("</Forecast>\n\n")
print("TEST_CYCLES_END", cyc)
| bsd-3-clause |
alvarofierroclavero/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
adwiputra/LUMENS-repo | processing/algs/PolarPlot.py | 6 | 3110 | # -*- coding: utf-8 -*-
"""
***************************************************************************
BarPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
import matplotlib.cm as cm
from matplotlib.pyplot import figure, show, rc
import numpy as np
from PyQt4.QtCore import *
from qgis.core import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.parameters.ParameterTable import ParameterTable
from processing.parameters.ParameterTableField import ParameterTableField
from processing.outputs.OutputHTML import OutputHTML
from processing.tools import *
class PolarPlot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
VALUE_FIELD = 'VALUE_FIELD'
def processAlgorithm(self, progress):
uri = self.getParameterValue(self.INPUT)
layer = getObjectFromUri(uri)
namefieldname = self.getParameterValue(self.NAME_FIELD)
valuefieldname = self.getParameterValue(self.VALUE_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.getAttributeValues(layer, namefieldname,
valuefieldname)
plt.close()
fig = figure(figsize=(8, 8))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
N = len(values[valuefieldname])
theta = np.arange(0.0, 2 * np.pi, 2 * np.pi / N)
radii = values[valuefieldname]
width = 2 * np.pi / N
ax.bar(theta, radii, width=width, bottom=0.0)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<img src="' + plotFilename + '"/>')
f.close()
def defineCharacteristics(self):
self.name = 'Polar plot'
self.group = 'Graphics'
self.addParameter(ParameterTable(self.INPUT, 'Input table'))
self.addParameter(ParameterTableField(self.NAME_FIELD,
'Category name field', self.INPUT))
self.addParameter(ParameterTableField(self.VALUE_FIELD, 'Value field',
self.INPUT))
self.addOutput(OutputHTML(self.OUTPUT, 'Output'))
| gpl-2.0 |
timestocome/Test-stock-prediction-algorithms | StockMarketTimeSeriesAnomalies/YearlyReturnsHistograms.py | 1 | 2183 | # http://github.com/timestocome
# take a look at the differences in daily returns for recent bull and bear markets
# plot daily returns for a year, check plot agains year's return
# power rule histograms rule in good years, flatter ones in bear markets
# looks like the histogram flattens as the market peaks, might be a leading indicator
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
######################################################################
# data
########################################################################
# read in datafile created in LoadAndMatchDates.py
data = pd.read_csv('StockDataWithVolume.csv', index_col='Date', parse_dates=True)
features = [data.columns.values]
# switch to log data
data['logDJIA'] = np.log(data['DJIA'])
data['logNASDAQ'] = np.log(data['NASDAQ'])
data['logS&P'] = np.log(data['S&P'])
data['logRussell'] = np.log(data['Russell 2000'])
# log returns
data['logReturnsNASDAQ'] = data['logNASDAQ'] - data['logNASDAQ'].shift(1)
data['logReturnsDJIA'] = data['logDJIA'] - data['logDJIA'].shift(1)
data['logReturnsS&P'] = data['logS&P'] - data['logS&P'].shift(1)
data['logReturnsRussell'] = data['logRussell'] - data['logRussell'].shift(1)
# remove nan row from target creation
data = data.dropna()
bins = [-0.16, -0.14, -0.12, -0.10, -0.08, -0.06, -0.04, -0.02, 0.0, 0.02, 0.04, 0.06, 0.07, 0.08, 0.10, 0.12, 0.14, 0.16]
def plot_histogram(d):
global year
n, b, _ = plt.hist(d, bins=bins)
return (n, b)
plot_histogram(data['logReturnsNASDAQ'])
h_plots = data['logReturnsNASDAQ'].groupby([data.index.year]).apply(plot_histogram)
plots = []
y = 1990
r = 3
c = 9
plot_n = 1
plt.figure(figsize=(30, 12))
for i, p in h_plots.iteritems():
plt.subplot(r, c, plot_n)
n = p[0]
bins = p[1]
plt.bar(bins[:-1], n, width=.02)
plt.xlim(min(bins), max(bins))
start_date = '01/01/%d' % y
end_date = '12/31/%d' % y
yearly_returns = data.loc[(data.index >= start_date) & (data.index <= end_date) ]
plt.title("%d LogNet: %f" % (y, yearly_returns['logReturnsNASDAQ'].sum()))
y += 1
plot_n += 1
plt.savefig('BullBearHistograms.png')
plt.show()
| mit |
shahankhatch/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
samzhang111/scikit-learn | sklearn/metrics/cluster/supervised.py | 22 | 30444 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, max_n_classes=5000):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
max_n_classes : int, optional (default=5000)
Maximal number of classeses handled for contingency_matrix.
This help to avoid Memory error with regression target
for mutual_information.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
if n_classes > max_n_classes:
raise ValueError("Too many classes for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
if n_clusters > max_n_classes:
raise ValueError("Too many clusters for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred, max_n_classes=5000):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes=5000):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred,
max_n_classes=max_n_classes)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred, max_n_classes=5000):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[0]
def completeness_score(labels_true, labels_pred, max_n_classes=5000):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[1]
def v_measure_score(labels_true, labels_pred, max_n_classes=5000):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None,
max_n_classes=5000):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the mutual_info_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
bhilburn/gnuradio | gr-filter/examples/fft_filter_ccc.py | 47 | 4363 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fft_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw0, bw1, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw0 = bw0
self._bw1 = bw1
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.complex_band_pass_2(1, self._fs,
self._bw0, self._bw1,
self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fft_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-S", "--start-pass", type="eng_float", default=1000,
help="Start of Passband [default=%default]")
parser.add_option("-E", "--end-pass", type="eng_float", default=2000,
help="End of Passband [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fft_filter_ccc(options.nsamples,
options.samplerate,
options.start_pass,
options.end_pass,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
h2educ/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
koyadovic/Dia | predictive/systems/statistical/analysis/basics/hba1c.py | 1 | 13047 | # -*- coding: utf-8 -*-
import matplotlib.patches as mpatches
import matplotlib.ticker as mticker
import numpy as np
import math
from ...analysis.tools.stats import PolynomialRegression, LinearRegression
# MODELOS
################################################################################
from ...analysis.model import engine as predictive_engine
from ...analysis.model import analysis_session
from sqlalchemy import Column, Integer, Float, Date
from sqlalchemy.ext.declarative import declarative_base
from predictive.systems.statistical.analysis.tools.graphs import Graph
from predictive.systems.statistical.analysis.tools.property import propertycached
from predictive.systems.statistical.analysis.basics.context import Context
from predictive.systems.statistical.analysis.basics.daytimes import DayTimes
from predictive.systems.statistical.tools.dates import Datetime, Timedelta
from dia.core import diacore
Base = declarative_base(predictive_engine)
class _HbA1cRecord(Base):
""""""
__tablename__ = 'analysis_basics_hba1c_records'
""""""
id = Column(Integer, primary_key=True) # lint:ok
user_pk = Column(Integer, nullable=False, index=True)
date = Column(Date, nullable=False, index=True)
# recommended doses absorved in 24 hours
value = Column(Float, nullable=False)
def __str__(self):
st = "HbA1cRecord: user_pk: {}, date: {}, value: {}".format(
self.user_pk,
self.date,
self.value
)
return st
@staticmethod
def last_values(**kvargs):
options = {
'user_pk': None,
'from_date': None,
'until_date': None,
'order_by_date': True,
'desc_order': False,
'limit': None }
options.update(kvargs)
if options['user_pk'] == None:
return None
se = analysis_session()
r_query = se.query(_HbA1cRecord).\
filter(_HbA1cRecord.user_pk == options['user_pk'])
"""
Dates
"""
if options['until_date'] != None:
r_query = r_query.filter(_HbA1cRecord.date <= options['until_date'])
if options['from_date'] != None:
r_query = r_query.filter(_HbA1cRecord.date >= options['from_date'])
"""
Order
"""
if options['order_by_date']:
if options['desc_order']:
r_query = r_query.order_by(_HbA1cRecord.date.desc())
else:
r_query = r_query.order_by(_HbA1cRecord.date)
"""
Limit
"""
if options['limit'] != None:
r_query = r_query.limit(options['limit'])
return r_query.all()
@staticmethod
def update_value(user_pk, date, value):
se = analysis_session()
last = se.query(_HbA1cRecord).\
filter(_HbA1cRecord.user_pk == user_pk).\
filter(_HbA1cRecord.date == date).\
first()
if last == None:
record = _HbA1cRecord(
user_pk=user_pk,
date=date,
value=value
)
se.add(record)
else:
last.value = value
se.commit()
Base.metadata.create_all(checkfirst=True)
# Fin de los modelos
##########################################################################
class _HbA1cGraph(Graph):
def __init__(self, parent, current_value):
options = {
"name": 'HbA1c',
"figsize": (8, 6),
# "dpi": 70,
"cols": 1,
}
super(_HbA1cGraph, self).__init__(**options)
self.parent = parent
self.add_drawing_routine(self._drawing_routine())
def _drawing_routine(self):
def draw_routine(ax):
x_days_old = self.parent.records_days_passed
y_values = self.parent.records_values
"""
Completamos cosas como el título y color
"""
GREEN_ZONE = [4., 6.2]
ORANGE_ZONE = [6.2, 7.5]
RED_ZONE = [7.5, 10.]
if len(y_values) > 0:
min_value = np.min(y_values)
max_value = np.max(y_values)
min_value -= 0.5
max_value += 0.5
if min_value > RED_ZONE[0]:
GREEN_ZONE = None
ORANGE_ZONE = None
RED_ZONE[0] = min_value
elif min_value > ORANGE_ZONE[0]:
GREEN_ZONE = None
ORANGE_ZONE[0] = min_value
elif min_value > GREEN_ZONE[0]:
GREEN_ZONE[0] = min_value
if max_value < GREEN_ZONE[1]:
RED_ZONE = None
ORANGE_ZONE = None
GREEN_ZONE[1] = max_value
elif max_value < ORANGE_ZONE[1]:
RED_ZONE = None
ORANGE_ZONE[1] = max_value
else:
RED_ZONE[1] = max_value
ax.set_ylim(min_value, max_value)
ax.set_xlim(np.min(x_days_old), np.max(x_days_old) + 1)
ax.set_title("Hemoglobina glicosilada")
ax.set_xlabel(u"Días de antigüedad")
ax.set_ylabel(u"% Hemoglobina glicosilada")
ax.xaxis.set_major_formatter(mticker.FormatStrFormatter('$%d$'))
ax.yaxis.set_major_formatter(mticker.FormatStrFormatter('$%.1f$%%'))
ax.scatter(x_days_old, y_values, marker='*', color='blue', alpha=0.3)
"""
Linear regression
"""
linear_x, linear_y = self.parent.linear_regression.data_to_plot(np.min(x_days_old), np.max(x_days_old) + 1)
regression, = ax.plot(linear_x, linear_y, color='red', label=u"$f(x)={}$".format(self.parent.linear_regression.latex))
"""
Polynomial regression
"""
poly_x, poly_y = self.parent.polynomial_regression.data_to_plot(np.min(x_days_old), np.max(x_days_old) + 1)
poly_tex = self.parent.polynomial_regression.latex
poly_regression, = ax.plot(poly_x, poly_y, label=r"$g(x)={}$".format(poly_tex))
"""
Zonas
"""
if GREEN_ZONE != None:
ax.axhspan(GREEN_ZONE[0], GREEN_ZONE[1], color='green', alpha=0.05)
if ORANGE_ZONE != None:
ax.axhspan(ORANGE_ZONE[0], ORANGE_ZONE[1], color='orange', alpha=0.1)
if RED_ZONE != None:
ax.axhspan(RED_ZONE[0], RED_ZONE[1], color='red', alpha=0.1)
ax.axvspan(0, 0, color='black')
ax.annotate("{}: HbA1c ${}$%".format(self.parent.context.current_datetime.date(), round(self.parent.current_value, 1)),
xy=(0, self.parent.current_value), # theta, radius
xytext=(-2, ((self.parent.current_value - min_value) / 2.0) + min_value), # fraction, fraction
textcoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='right',
verticalalignment='bottom',
)
leg = mpatches.Patch(alpha=0., label="HbA1c actual: ${}$%".format(round(self.parent.current_value, 1)))
future_leg = None
if self.parent.linear_regression.f != None:
future_leg = mpatches.Patch(alpha=0., label=u"Estimación 30 días: ${}$%".format(round(self.parent.future_value(30), 1)))
if regression != None and future_leg != None:
ax.legend(loc='upper left', handles=[leg, future_leg, regression, poly_regression,])
else:
ax.legend(loc='upper left', handles=[leg])
ax.grid()
return lambda ax: draw_routine(ax)
class HbA1c(object):
"""
Se tiene que ocupar de dos cosas:
1.- Realizar una serie de cálculos con las últimas entradas de valores HbA1c,
regresión lineal, valor actual, etc.
2.- Facilitar el añadido de nuevas entradas para valores de HbA1c
"""
def __init__(self, context):
self._c = context
self.records = _HbA1cRecord.last_values(
user_pk=self._c.user_pk,
until_date=self._c.current_datetime.date(),
order_by_date=True,
desc_order=True,
limit=120
)
if self.records == None:
self.records = []
if len(self.records) > 0:
self.linear_regression = LinearRegression(self.records_days_passed, self.records_values)
degree = len(self.records)
if degree > 3: degree = 3
self.polynomial_regression = PolynomialRegression(self.records_days_passed, self.records_values, degree)
self.graph = _HbA1cGraph(self, self.current_value)
@property
def context(self):
return self._c
def __str__(self):
str = """HbA1c:
Current value ................. {}%
Estimated value in 30 days .... {}%
Estimated value in 60 days .... {}%
Estimated value in 90 days .... {}%""".format(
self.current_value,
self.future_value(30),
self.future_value(60),
self.future_value(90))
return str
@propertycached
def records_days_passed(self):
list_ = [(record.date - self._c.current_datetime.date()).total_seconds() / 60. / 60. / 24. for record in self.records]
return list_
@propertycached
def records_values(self):
list_ = [record.value for record in self.records]
return list_
"""
Valor actual
"""
@propertycached
def current_value(self):
if len(self.records) > 0:
return self.records[0].value
return 0.0
"""
Valor futuro
.future_value(30) devolvería el % estimado en 30 días.
"""
def future_value(self, days):
f = self.linear_regression.f
return f(days)
@staticmethod
def _weight(days_old):
assert days_old >= 0, "days_old debe ser mayor o igual a 0"
return 1.0 / (math.pow(2, (days_old / 30.0)))
@staticmethod
def _mgdl2hba1cpercentage(mgdl):
return ((mgdl - 60.) / 31.) + 4.
"""
Recalcula el HbA1c para el start_dt especificado.
Si existe un record lo actualiza, si no, añade una nueva entrada
"""
def recalculate(self, day_times):
glucoses = diacore.get_glucoses(
user_pk=self._c.user_pk,
from_utc_timestamp=(self._c.current_datetime - Timedelta(days=120)).utc_timestamp,
until_utc_timestamp=self._c.current_datetime.utc_timestamp,
order_by_utc_timestamp=True,
order_ascending=True
)
value = 0.
total = 0.
n = 0.
last_days_old = None
meals_day_times = []
snacks_day_times = []
for glucose in glucoses:
day_time = day_times.nearest_day_time(Datetime.utcfromtimestamp(glucose.utc_timestamp))
days_old = (self._c.current_datetime - Datetime.utcfromtimestamp(glucose.utc_timestamp)).total_days
if last_days_old == None: last_days_old = days_old
if last_days_old != days_old:
if len(snacks_day_times) < len(meals_day_times):
number = len(meals_day_times) - len(snacks_day_times)
for _ in range(number):
total += 150. * HbA1c._weight(last_days_old)
n += HbA1c._weight(last_days_old)
last_days_old = days_old
meals_day_times = []
snacks_day_times = []
if day_times.is_meal(Datetime.utcfromtimestamp(glucose.utc_timestamp)):
meals_day_times.append(day_time)
else:
snacks_day_times.append(day_time)
total += glucose.mgdl_level * HbA1c._weight(days_old)
n += HbA1c._weight(days_old)
if n > 0.:
value = HbA1c._mgdl2hba1cpercentage(total / n)
_HbA1cRecord.update_value(self._c.user_pk, self._c.current_datetime.date(), value)
def recalculate_hba1c(glucose):
context = Context(glucose.user_pk, glucose.utc_timestamp)
day_times = DayTimes(context)
hba1c = HbA1c(context)
hba1c.recalculate(day_times)
def main():
context = Context(1, Datetime(2017, 1, 12, 13, 0))
h = HbA1c(context)
h.graph.show()
import sys
sys.exit()
if __name__ == "__main__":
main()
| gpl-2.0 |
eclee25/flu-SDI-exploratory-age | scripts/create_fluseverity_figs_v5/S_zRR_excessMort_st_sentinels_v5.py | 1 | 6066 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 1/9/15
###Function: mean peak-based retro zRR metric vs. excess P&I mortality rate with best fit lines at state level for good "sentinel" states, severe states, and mildish states
###Import data: Py_export/SDI_st_classif_covCareAdj_v5_7.csv
###Command Line: python S_zRR_excessMort_st_sentinels_v5.py
##############################################
### notes ###
### packages/modules ###
import csv
import matplotlib.pyplot as plt
import numpy as np
## local modules ##
import functions_v5 as fxn
### data structures ###
### functions ###
### data files ###
stixin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_state_classif_covCareAdj_v5_7st.csv', 'r')
stixin.readline() # remove header
stix = csv.reader(stixin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
sl = fxn.gp_seasonlabels
fw = fxn.gp_fluweeks
fs = 24
fssml = 16
lwd = fxn.gp_linewidth
msz = 6
colorvec = fxn.gp_colors
sentinels = ['LA', 'IL']
severes = ['PA', 'MD', 'VA', 'NC', 'SC', 'FL']
mildishs = ['CA', 'OR', 'WA']
### program ###
## import severity index ##
d_st_classif = fxn.readStateClassifFile(stix)
# grab list of unique keys in dataset
plot_keys = [key for key in sorted(d_st_classif) if not np.isnan(d_st_classif[key][0])] # rm nan
## import excess P&I mortality rates ##
d_st_excessPI = fxn.excessPI_state_import()
## plot sentinels ##
fig1 = plt.figure()
ax1 = fig1.add_subplot(1,1,1)
for st, col in zip(sentinels, colorvec[:len(sentinels)]):
mask_keys = [key for key in plot_keys if key[1] == st]
retrozOR = [d_st_classif[key][0] for key in mask_keys]
excessPI = [d_st_excessPI[key][0] for key in mask_keys]
excessPI_detrended = [d_st_excessPI[key][1] for key in mask_keys]
print '%s excess PI corr coef' %(st), np.corrcoef(excessPI, retrozOR) # LA=0.41, IL=0.41
print '%s detrended excess PI corr coef' %(st), np.corrcoef(excessPI_detrended, retrozOR) # LA=0.45, IL=0.45
# setup for best fit line
Efit = np.polyfit(retrozOR, excessPI, 1)
Efit_fn = np.poly1d(Efit)
print '%s excess PI mort rate' %(st), Efit_fn
# best fit line
ax1.plot(retrozOR, excessPI, 'o', retrozOR, Efit_fn(retrozOR), '-', color=col, lw=lwd)
ax1.plot([],[], color=col, linestyle='-', lw=lwd, label=st)
# delineate mild, moderate severe
ax1.vlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax1.annotate('Mild', xy=(-14.5,0.25), fontsize=fssml)
ax1.annotate('Severe', xy=(11,15), fontsize=fssml)
# ili and P&I axis
ax1.set_ylabel('Excess P&I Mort. per 100,000', fontsize=fs)
ax1.set_xlabel(fxn.gp_sigma_r, fontsize=fs)
ax1.tick_params(axis='both', labelsize=fssml)
ax1.set_xlim([-15,15])
ax1.set_ylim([-2, 16])
ax1.legend(loc=2)
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs_v5/exploratory/S_eMort_zRR_sentinels.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
##############################
## plot severes ##
fig2 = plt.figure()
ax2 = fig2.add_subplot(1,1,1)
for st, col in zip(severes, colorvec[:len(severes)]):
mask_keys = [key for key in plot_keys if key[1] == st]
retrozOR = [d_st_classif[key][0] for key in mask_keys]
excessPI = [d_st_excessPI[key][0] for key in mask_keys]
excessPI_detrended = [d_st_excessPI[key][1] for key in mask_keys]
print '%s excess PI corr coef' %(st), np.corrcoef(excessPI, retrozOR) #
print '%s detrended excess PI corr coef' %(st), np.corrcoef(excessPI_detrended, retrozOR) #
# setup for best fit line
Efit = np.polyfit(retrozOR, excessPI, 1)
Efit_fn = np.poly1d(Efit)
print '%s excess PI mort rate' %(st), Efit_fn
# best fit line
ax2.plot(retrozOR, excessPI, 'o', retrozOR, Efit_fn(retrozOR), '-', color=col, lw=lwd)
ax2.plot([],[], color=col, linestyle='-', lw=lwd, label=st)
# delineate mild, moderate severe
ax2.vlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax2.annotate('Mild', xy=(-14.5,0.25), fontsize=fssml)
ax2.annotate('Severe', xy=(11,15), fontsize=fssml)
# ili and P&I axis
ax2.set_ylabel('Excess P&I Mort. per 100,000', fontsize=fs)
ax2.set_xlabel(fxn.gp_sigma_r, fontsize=fs)
ax2.tick_params(axis='both', labelsize=fssml)
ax2.set_xlim([-15,15])
ax2.set_ylim([-2, 16])
ax2.legend(loc=2)
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs_v5/exploratory/S_eMort_zRR_severes.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
##############################
## plot milds ##
fig3 = plt.figure()
ax3 = fig3.add_subplot(1,1,1)
for st, col in zip(mildishs, colorvec[:len(mildishs)]):
mask_keys = [key for key in plot_keys if key[1] == st]
retrozOR = [d_st_classif[key][0] for key in mask_keys]
excessPI = [d_st_excessPI[key][0] for key in mask_keys]
excessPI_detrended = [d_st_excessPI[key][1] for key in mask_keys]
print '%s excess PI corr coef' %(st), np.corrcoef(excessPI, retrozOR) #
print '%s detrended excess PI corr coef' %(st), np.corrcoef(excessPI_detrended, retrozOR) #
# setup for best fit line
Efit = np.polyfit(retrozOR, excessPI, 1)
Efit_fn = np.poly1d(Efit)
print '%s excess PI mort rate' %(st), Efit_fn
# best fit line
ax3.plot(retrozOR, excessPI, 'o', retrozOR, Efit_fn(retrozOR), '-', color=col, lw=lwd)
ax3.plot([],[], color=col, linestyle='-', lw=lwd, label=st)
# delineate mild, moderate severe
ax3.vlines([-1, 1], -20, 20, colors='k', linestyles='solid')
ax3.annotate('Mild', xy=(-14.5,0.25), fontsize=fssml)
ax3.annotate('Severe', xy=(11,15), fontsize=fssml)
# ili and P&I axis
ax3.set_ylabel('Excess P&I Mort. per 100,000', fontsize=fs)
ax3.set_xlabel(fxn.gp_sigma_r, fontsize=fs)
ax3.tick_params(axis='both', labelsize=fssml)
ax3.set_xlim([-15,15])
ax3.set_ylim([-2, 16])
ax3.legend(loc=2)
plt.savefig('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Manuscripts/Age_Severity/fluseverity_figs_v5/exploratory/S_eMort_zRR_mildishs.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show() | mit |
devanshdalal/scikit-learn | examples/exercises/plot_iris_exercise.py | 31 | 1622 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:int(.9 * n_sample)]
y_train = y[:int(.9 * n_sample)]
X_test = X[int(.9 * n_sample):]
y_test = y[int(.9 * n_sample):]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'],
linestyles=['--', '-', '--'], levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
marqh/cartopy | lib/cartopy/examples/multiple_maps.py | 1 | 1805 | # (C) British Crown Copyright 2011 - 2012, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import cartopy
from cartopy.examples.waves import sample_data
def main():
pc = cartopy.prj.PlateCarree()
geod = pc.as_geodetic()
rob = cartopy.prj.Robinson()
igh = cartopy.prj.InterruptedGoodeHomolosine()
nps = cartopy.prj.NorthPolarStereo()
x, y, z = sample_data()
slices = slice(20, 55), slice(85, 115)
x = x.__getitem__(slices)
y = y.__getitem__(slices)
z = z.__getitem__(slices)
ax = plt.subplot(2, 2, 1, projection=pc)
plt.scatter(x, y, c=z, transform=geod)
ax.coastlines()
ax = plt.subplot(2, 2, 2, projection=rob)
plt.scatter(x, y, c=z, transform=geod)
ax.coastlines()
# XXX Ask the mpl mailing list to find out how you might create a subplot and subsequently modify it's projection.
# plt.subplot(2, 2, 3, )#projection=igh)
# plt.scatter(x, y, c=z, transform=pc)
ax = plt.subplot(2, 2, 4, projection=nps)
plt.scatter(x, y, c=z, transform=geod)
ax.coastlines()
plt.show()
if __name__ == '__main__':
main()
| gpl-3.0 |
jmfranck/pyspecdata | docs/_downloads/02253b2c9f46189862b90a157438aa95/basic_units.py | 3 | 11135 | """
===========
Basic Units
===========
"""
import math
import numpy as np
import matplotlib.units as units
import matplotlib.ticker as ticker
class ProxyDelegate:
def __init__(self, fn_name, proxy_type):
self.proxy_type = proxy_type
self.fn_name = fn_name
def __get__(self, obj, objtype=None):
return self.proxy_type(self.fn_name, obj)
class TaggedValueMeta(type):
def __init__(self, name, bases, dict):
for fn_name in self._proxies:
if not hasattr(self, fn_name):
setattr(self, fn_name,
ProxyDelegate(fn_name, self._proxies[fn_name]))
class PassThroughProxy:
def __init__(self, fn_name, obj):
self.fn_name = fn_name
self.target = obj.proxy_target
def __call__(self, *args):
fn = getattr(self.target, self.fn_name)
ret = fn(*args)
return ret
class ConvertArgsProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
converted_args = []
for a in args:
try:
converted_args.append(a.convert_to(self.unit))
except AttributeError:
converted_args.append(TaggedValue(a, self.unit))
converted_args = tuple([c.get_value() for c in converted_args])
return PassThroughProxy.__call__(self, *converted_args)
class ConvertReturnProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
ret = PassThroughProxy.__call__(self, *args)
return (NotImplemented if ret is NotImplemented
else TaggedValue(ret, self.unit))
class ConvertAllProxy(PassThroughProxy):
def __init__(self, fn_name, obj):
PassThroughProxy.__init__(self, fn_name, obj)
self.unit = obj.unit
def __call__(self, *args):
converted_args = []
arg_units = [self.unit]
for a in args:
if hasattr(a, 'get_unit') and not hasattr(a, 'convert_to'):
# if this arg has a unit type but no conversion ability,
# this operation is prohibited
return NotImplemented
if hasattr(a, 'convert_to'):
try:
a = a.convert_to(self.unit)
except Exception:
pass
arg_units.append(a.get_unit())
converted_args.append(a.get_value())
else:
converted_args.append(a)
if hasattr(a, 'get_unit'):
arg_units.append(a.get_unit())
else:
arg_units.append(None)
converted_args = tuple(converted_args)
ret = PassThroughProxy.__call__(self, *converted_args)
if ret is NotImplemented:
return NotImplemented
ret_unit = unit_resolver(self.fn_name, arg_units)
if ret_unit is NotImplemented:
return NotImplemented
return TaggedValue(ret, ret_unit)
class TaggedValue(metaclass=TaggedValueMeta):
_proxies = {'__add__': ConvertAllProxy,
'__sub__': ConvertAllProxy,
'__mul__': ConvertAllProxy,
'__rmul__': ConvertAllProxy,
'__cmp__': ConvertAllProxy,
'__lt__': ConvertAllProxy,
'__gt__': ConvertAllProxy,
'__len__': PassThroughProxy}
def __new__(cls, value, unit):
# generate a new subclass for value
value_class = type(value)
try:
subcls = type(f'TaggedValue_of_{value_class.__name__}',
(cls, value_class), {})
return object.__new__(subcls)
except TypeError:
return object.__new__(cls)
def __init__(self, value, unit):
self.value = value
self.unit = unit
self.proxy_target = self.value
def __getattribute__(self, name):
if name.startswith('__'):
return object.__getattribute__(self, name)
variable = object.__getattribute__(self, 'value')
if hasattr(variable, name) and name not in self.__class__.__dict__:
return getattr(variable, name)
return object.__getattribute__(self, name)
def __array__(self, dtype=object):
return np.asarray(self.value).astype(dtype)
def __array_wrap__(self, array, context):
return TaggedValue(array, self.unit)
def __repr__(self):
return 'TaggedValue({!r}, {!r})'.format(self.value, self.unit)
def __str__(self):
return str(self.value) + ' in ' + str(self.unit)
def __len__(self):
return len(self.value)
def __iter__(self):
# Return a generator expression rather than use `yield`, so that
# TypeError is raised by iter(self) if appropriate when checking for
# iterability.
return (TaggedValue(inner, self.unit) for inner in self.value)
def get_compressed_copy(self, mask):
new_value = np.ma.masked_array(self.value, mask=mask).compressed()
return TaggedValue(new_value, self.unit)
def convert_to(self, unit):
if unit == self.unit or not unit:
return self
try:
new_value = self.unit.convert_value_to(self.value, unit)
except AttributeError:
new_value = self
return TaggedValue(new_value, unit)
def get_value(self):
return self.value
def get_unit(self):
return self.unit
class BasicUnit:
def __init__(self, name, fullname=None):
self.name = name
if fullname is None:
fullname = name
self.fullname = fullname
self.conversions = dict()
def __repr__(self):
return f'BasicUnit({self.name})'
def __str__(self):
return self.fullname
def __call__(self, value):
return TaggedValue(value, self)
def __mul__(self, rhs):
value = rhs
unit = self
if hasattr(rhs, 'get_unit'):
value = rhs.get_value()
unit = rhs.get_unit()
unit = unit_resolver('__mul__', (self, unit))
if unit is NotImplemented:
return NotImplemented
return TaggedValue(value, unit)
def __rmul__(self, lhs):
return self*lhs
def __array_wrap__(self, array, context):
return TaggedValue(array, self)
def __array__(self, t=None, context=None):
ret = np.array([1])
if t is not None:
return ret.astype(t)
else:
return ret
def add_conversion_factor(self, unit, factor):
def convert(x):
return x*factor
self.conversions[unit] = convert
def add_conversion_fn(self, unit, fn):
self.conversions[unit] = fn
def get_conversion_fn(self, unit):
return self.conversions[unit]
def convert_value_to(self, value, unit):
conversion_fn = self.conversions[unit]
ret = conversion_fn(value)
return ret
def get_unit(self):
return self
class UnitResolver:
def addition_rule(self, units):
for unit_1, unit_2 in zip(units[:-1], units[1:]):
if unit_1 != unit_2:
return NotImplemented
return units[0]
def multiplication_rule(self, units):
non_null = [u for u in units if u]
if len(non_null) > 1:
return NotImplemented
return non_null[0]
op_dict = {
'__mul__': multiplication_rule,
'__rmul__': multiplication_rule,
'__add__': addition_rule,
'__radd__': addition_rule,
'__sub__': addition_rule,
'__rsub__': addition_rule}
def __call__(self, operation, units):
if operation not in self.op_dict:
return NotImplemented
return self.op_dict[operation](self, units)
unit_resolver = UnitResolver()
cm = BasicUnit('cm', 'centimeters')
inch = BasicUnit('inch', 'inches')
inch.add_conversion_factor(cm, 2.54)
cm.add_conversion_factor(inch, 1/2.54)
radians = BasicUnit('rad', 'radians')
degrees = BasicUnit('deg', 'degrees')
radians.add_conversion_factor(degrees, 180.0/np.pi)
degrees.add_conversion_factor(radians, np.pi/180.0)
secs = BasicUnit('s', 'seconds')
hertz = BasicUnit('Hz', 'Hertz')
minutes = BasicUnit('min', 'minutes')
secs.add_conversion_fn(hertz, lambda x: 1./x)
secs.add_conversion_factor(minutes, 1/60.0)
# radians formatting
def rad_fn(x, pos=None):
if x >= 0:
n = int((x / np.pi) * 2.0 + 0.25)
else:
n = int((x / np.pi) * 2.0 - 0.25)
if n == 0:
return '0'
elif n == 1:
return r'$\pi/2$'
elif n == 2:
return r'$\pi$'
elif n == -1:
return r'$-\pi/2$'
elif n == -2:
return r'$-\pi$'
elif n % 2 == 0:
return fr'${n//2}\pi$'
else:
return fr'${n}\pi/2$'
class BasicUnitConverter(units.ConversionInterface):
@staticmethod
def axisinfo(unit, axis):
"""Return AxisInfo instance for x and unit."""
if unit == radians:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.fullname,
)
elif unit == degrees:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter(r'$%i^\circ$'),
label=unit.fullname,
)
elif unit is not None:
if hasattr(unit, 'fullname'):
return units.AxisInfo(label=unit.fullname)
elif hasattr(unit, 'unit'):
return units.AxisInfo(label=unit.unit.fullname)
return None
@staticmethod
def convert(val, unit, axis):
if units.ConversionInterface.is_numlike(val):
return val
if np.iterable(val):
if isinstance(val, np.ma.MaskedArray):
val = val.astype(float).filled(np.nan)
out = np.empty(len(val))
for i, thisval in enumerate(val):
if np.ma.is_masked(thisval):
out[i] = np.nan
else:
try:
out[i] = thisval.convert_to(unit).get_value()
except AttributeError:
out[i] = thisval
return out
if np.ma.is_masked(val):
return np.nan
else:
return val.convert_to(unit).get_value()
@staticmethod
def default_units(x, axis):
"""Return the default unit for x or None."""
if np.iterable(x):
for thisx in x:
return thisx.unit
return x.unit
def cos(x):
if np.iterable(x):
return [math.cos(val.convert_to(radians).get_value()) for val in x]
else:
return math.cos(x.convert_to(radians).get_value())
units.registry[BasicUnit] = units.registry[TaggedValue] = BasicUnitConverter()
| bsd-3-clause |
sannecottaar/burnman | contrib/CHRU2014/paper_benchmark.py | 4 | 4701 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
paper_benchmark
---------------
This script reproduces the benchmark in :cite:`Cottaar2014`, Figure 3.
"""
from __future__ import absolute_import
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
if not os.path.exists('burnman') and os.path.exists('../../burnman'):
sys.path.insert(1, os.path.abspath('../..'))
sys.path.insert(1, os.path.abspath('.'))
import burnman
figsize = (6, 5)
prop = {'size': 12}
plt.rc('text', usetex=True)
plt.rc('font', family='sans-serif')
figure = plt.figure(dpi=100, figsize=figsize)
def check_slb_fig7_txt():
"""
Calculates all values for forsterite and benchmarks with values from Stixrude and Lithgow-Bertelloni (personal communication)
"""
forsterite = burnman.Mineral()
forsterite.params = {'name': 'forsterite',
'V_0': 43.603e-6,
'K_0': 127.955e9,
'Kprime_0': 4.232,
'G_0': 81.6e9,
'Gprime_0': 1.4,
'molar_mass': .140695,
'n': 7.0,
'Debye_0': 809.183,
'grueneisen_0': .993,
'q_0': 2.093,
'eta_s_0': 2.364}
forsterite.set_method('slb3')
data = np.loadtxt("slb_benchmark.txt", skiprows=1)
temperature = np.array(data[:, 2])
pressure = np.array(data[:, 0])
rho = np.array(data[:, 3])
frho = np.empty_like(rho)
rho_comp = np.empty_like(rho)
Kt = np.array(data[:, 4])
fKt = np.empty_like(Kt)
Kt_comp = np.empty_like(Kt)
Ks = np.array(data[:, 5])
fKs = np.empty_like(Ks)
Ks_comp = np.empty_like(Ks)
G = np.array(data[:, 6])
fG = np.empty_like(G)
G_comp = np.empty_like(G)
VB = np.array(data[:, 7])
fVB = np.empty_like(VB)
VB_comp = np.empty_like(VB)
VS = np.array(data[:, 8])
fVS = np.empty_like(VS)
VS_comp = np.empty_like(VS)
VP = np.array(data[:, 9])
fVP = np.empty_like(VP)
VP_comp = np.empty_like(VP)
vol = np.array(data[:, 10])
fvol = np.empty_like(vol)
vol_comp = np.empty_like(vol)
alpha = np.array(data[:, 11])
falpha = np.empty_like(alpha)
alpha_comp = np.empty_like(alpha)
Cp = np.array(data[:, 12])
fCp = np.empty_like(Cp)
Cp_comp = np.empty_like(Cp)
gr = np.array(data[:, 13])
gr_comp = np.empty_like(gr)
for i in range(len(temperature)):
forsterite.set_state(pressure[i], temperature[i])
rho_comp[i] = 100. * (forsterite.density / 1000. - rho[i]) / rho[i]
Kt_comp[i] = 100. * (
forsterite.isothermal_bulk_modulus / 1.e9 - Kt[i]) / Kt[i]
Ks_comp[i] = 100. * (
forsterite.adiabatic_bulk_modulus / 1.e9 - Ks[i]) / Ks[i]
G_comp[i] = 100. * (forsterite.shear_modulus / 1.e9 - G[i]) / G[i]
VB_comp[i] = 100. * (forsterite.v_phi / 1000. - VB[i]) / VB[i]
VS_comp[i] = 100. * (forsterite.v_s / 1000. - VS[i]) / VS[i]
VP_comp[i] = 100. * (forsterite.v_p / 1000. - VP[i]) / VP[i]
vol_comp[i] = 100. * (forsterite.molar_volume * 1.e6 - vol[i]) / vol[i]
alpha_comp[i] = 100. * (
forsterite.thermal_expansivity / 1.e-5 - alpha[i]) / (alpha[-1])
Cp_comp[i] = 100. * (forsterite.heat_capacity_p /
forsterite.params['molar_mass'] / 1000. - Cp[i]) / (Cp[-1])
gr_comp[i] = (forsterite.grueneisen_parameter - gr[i]) / gr[i]
plt.plot(temperature, rho_comp, label=r'$\rho$')
plt.plot(temperature, Kt_comp, label=r'$K_S$')
plt.plot(temperature, Ks_comp, label=r'$K_T$')
plt.plot(temperature, G_comp, label=r'$G$')
plt.plot(temperature, VS_comp, label=r'$V_S$')
plt.plot(temperature, VP_comp, label=r'$V_P$')
plt.plot(temperature, VB_comp, label=r'$V_\phi$')
plt.plot(temperature, vol_comp, label=r'$V$')
plt.plot(temperature, alpha_comp, label=r'$\alpha$')
plt.plot(temperature, Cp_comp, label=r'$c_P$')
plt.plot(temperature, gr_comp, label=r'$\gamma$')
plt.xlim([0, 2200])
plt.ylim([-0.002, 0.002])
plt.yticks([-0.002, -0.001, 0, 0.001, 0.002])
plt.xticks([0, 800, 1600, 2200])
plt.xlabel("Temperature (K)")
plt.ylabel("Difference (\%)")
plt.legend(loc="lower center", prop=prop, ncol=4)
if "RUNNING_TESTS" not in globals():
plt.savefig("benchmark1.pdf", bbox_inches='tight')
plt.show()
if __name__ == "__main__":
check_slb_fig7_txt()
| gpl-2.0 |
KevinFasusi/supplychainpy | tests/test_model_inventory.py | 1 | 11942 | import os
import unittest
import logging
from cmath import isclose
from decimal import Decimal
from unittest import TestCase
import pandas as pd
from pandas import DataFrame
from supplychainpy import model_inventory
from supplychainpy._helpers._config_file_paths import ABS_FILE_PATH_APPLICATION_CONFIG
from supplychainpy._helpers._pickle_config import deserialise_config
from supplychainpy.model_inventory import analyse, recommendations
from supplychainpy.sample_data.config import ABS_FILE_PATH
# logging.basicConfig(filename='suchpy_tests_log.txt', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class TestBuildModel(TestCase):
_yearly_demand = {'jan': 75, 'feb': 75, 'mar': 75, 'apr': 75, 'may': 75, 'jun': 75, 'jul': 25,
'aug': 25, 'sep': 25, 'oct': 25, 'nov': 25, 'dec': 25}
_yearly_demand2 = {'jan': 75}
_categories = ('unit_cost', 'sku', 'reorder_level', 'safety_stock', 'reorder_quantity', 'ABC_XYZ_Classification',
'revenue', 'standard_deviation', 'quantity_on_hand', 'average_orders', 'shortages', 'excess_stock',
'demand_variability')
_expected_values = ('50', 'RX983-90', '99', '12', '57', '', '360000', '25', '390', '50', '0', '205', '0.500')
def test_model_orders_type(self):
"""Tests analyse_orders returns a 'dict' type. """
summary = model_inventory.analyse_orders(self._yearly_demand,
sku_id='RX983-90',
lead_time=Decimal(3),
unit_cost=Decimal(50.99),
reorder_cost=Decimal(400),
z_value=Decimal(1.28),
retail_price=Decimal(600),
quantity_on_hand=Decimal(390))
self.assertIsInstance(summary, dict)
def test_model_orders_length(self):
"""supplied orders data must be greater than 3 items long."""
with self.assertRaises(expected_exception=ValueError):
summary = model_inventory.analyse_orders(self._yearly_demand2,
sku_id="RX983-90",
lead_time=Decimal(3),
unit_cost=Decimal(50.99),
reorder_cost=Decimal(400),
z_value=Decimal(.28),
retail_price=Decimal(600),
quantity_on_hand=Decimal(390))
def test_model_orders_content(self):
""" test the return values for simple analyse orders"""
summary = model_inventory.analyse_orders(self._yearly_demand,
sku_id="RX983-90",
lead_time=Decimal(3),
unit_cost=Decimal(50),
reorder_cost=Decimal(400),
z_value=Decimal(.28),
retail_price=Decimal(600),
quantity_on_hand=Decimal(390))
for i, k in zip(self._categories, self._expected_values):
self.assertEqual(str(k), summary.get(i))
# finish with all members
def test_standard_deviation_row_count(self):
d = model_inventory.analyse(file_path=ABS_FILE_PATH['COMPLETE_CSV_SM'],
z_value=Decimal(1.28),
reorder_cost=Decimal(400),
retail_price=Decimal(455),
file_type='csv')
analysed_orders = [demand.orders_summary() for demand in d]
self.assertEqual(len(d), 39)
def test_file_path_extension_row(self):
with self.assertRaises(expected_exception=Exception):
model_inventory.analyse_orders_from_file_row(file_path='test.tt',
reorder_cost=Decimal(450),
z_value=Decimal(1.28), retail_price=Decimal(455))
def test_file_path_extension_col(self):
# arrange, act
app_dir = os.path.dirname(__file__, )
rel_path = 'supplychainpy/test.tt'
abs_file_path = os.path.abspath(os.path.join(app_dir, '..', rel_path))
with self.assertRaises(expected_exception=Exception):
model_inventory.analyse_orders_from_file_row(abs_file_path,
reorder_cost=Decimal(450),
z_value=Decimal(1.28), retail_price=Decimal(100))
def test_standard_deviation_col_count(self):
d = model_inventory.analyse_orders_from_file_col(file_path=ABS_FILE_PATH['PARTIAL_COL_TXT_SM'],
sku_id='RX9304-43',
lead_time=Decimal(2),
unit_cost=Decimal(400),
reorder_cost=Decimal(45),
z_value=Decimal(1.28),
file_type="text",
retail_price=Decimal(30))
self.assertEqual(len(d), 19)
def test_standard_deviation_col_count_csv(self):
d = model_inventory.analyse_orders_from_file_col(ABS_FILE_PATH['PARTIAL_COL_CSV_SM'], 'RX9304-43',
reorder_cost=Decimal(45),
unit_cost=Decimal(400),
lead_time=Decimal(45),
z_value=Decimal(1.28),
file_type="csv",
retail_price=Decimal(30))
self.assertEqual(len(d), 19)
def test_standard_deviation_row_value(self):
"""Test Standard deviation value of row data, from text file."""
std = 0
d = model_inventory.analyse_orders_from_file_row(file_path=ABS_FILE_PATH['PARTIAL_ROW_TXT_SM'],
retail_price=Decimal(400),
reorder_cost=Decimal(450),
z_value=Decimal(1.28))
for row in d:
std = row.get('standard_deviation')
self.assertEqual(Decimal(std), 25)
def test_standard_deviation_col_value(self):
d = model_inventory.analyse_orders_from_file_col(file_path=ABS_FILE_PATH['PARTIAL_COL_TXT_SM'],
sku_id='RX9304-43',
reorder_cost=Decimal(45),
unit_cost=Decimal(400),
lead_time=Decimal(45),
z_value=Decimal(1.28),
file_type="text",
retail_price=Decimal(30))
# assert
self.assertEqual(Decimal(d.get('standard_deviation')), 25)
def test_analyse_orders_from_file_row_csv(self):
""""""
d = model_inventory.analyse_orders_from_file_row(file_path=ABS_FILE_PATH['COMPLETE_CSV_SM'],
reorder_cost=Decimal(45),
z_value=Decimal(1.28),
file_type="csv",
retail_price=Decimal(30),
currency='USD')
std = 0
for row in d:
if row.get('sku') == 'KR202-210':
std = row.get('standard_deviation')
break
# assert
self.assertTrue(isclose(Decimal(std), 950, abs_tol=2))
def test_file_path_abcxyz_extension(self):
with self.assertRaises(expected_exception=Exception):
abc = model_inventory.analyse_orders_abcxyz_from_file(file_path='test.ts',
z_value=Decimal(1.28),
reorder_cost=Decimal(5000),
file_type="csv")
def test_abcxyz_classification(self):
abc = model_inventory.analyse(file_path=ABS_FILE_PATH['COMPLETE_CSV_SM'],
z_value=Decimal(1.28),
reorder_cost=Decimal(5000),
file_type="csv")
for sku in abc:
item = sku.orders_summary()
if item['sku'] == 'KR202-209':
self.assertEqual(item['ABC_XYZ_Classification'], 'BY')
def test_data_frame(self):
raw_df = pd.read_csv(ABS_FILE_PATH['COMPLETE_CSV_SM'])
analysis_df = analyse(df=raw_df, start=1, interval_length=12, interval_type='months')
self.assertIsInstance(analysis_df[['sku', 'quantity_on_hand', 'excess_stock', 'shortages', 'ABC_XYZ_Classification']], DataFrame)
def test_short_raw_data(self):
yearly_demand = {'jan': 75, 'feb': 75}
with self.assertRaises(expected_exception=ValueError):
summary = model_inventory.analyse_orders(yearly_demand,
sku_id='RX983-90',
lead_time=Decimal(3),
unit_cost=Decimal(50.99),
reorder_cost=Decimal(400),
z_value=Decimal(1.28),
retail_price=Decimal(600),
quantity_on_hand=Decimal(390))
#def test_recommendation_per_sku(self):
# app_config = deserialise_config(ABS_FILE_PATH_APPLICATION_CONFIG)
# analysed_order = analyse(file_path=app_config['file'],z_value=Decimal(1.28),
# reorder_cost=Decimal(5000), file_type="csv", length=12, currency='USD')
# skus = [sku.orders_summary().get('sku') for sku in analysed_order]
# holts_forecast = {analysis.sku_id: analysis.holts_trend_corrected_forecast for analysis in
# analyse(file_path=app_config['file'],
# z_value=Decimal(1.28),
# reorder_cost=Decimal(5000),
# file_type="csv",
# length=12,
# currency='USD')}
# recommend = recommendations(analysed_orders=analysed_order, forecast=holts_forecast)
#
# for i in recommend.get('sku_recommendations'):
# self.assertIn(i, skus)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/statsmodels-0.5.0-py2.7-linux-x86_64.egg/statsmodels/tsa/base/tests/test_base.py | 3 | 2895 | import numpy as np
from pandas import Series
from pandas.util import testing as ptesting
from statsmodels.tsa.base.tsa_model import TimeSeriesModel
from statsmodels.tsa.base.datetools import dates_from_range
import numpy.testing as npt
try:
from pandas import DatetimeIndex
_pandas_08x = True
except ImportError:
_pandas_08x = False
def test_pandas_nodates_index():
from statsmodels.datasets import sunspots
y = sunspots.load_pandas().data.SUNACTIVITY
npt.assert_raises(ValueError, TimeSeriesModel, y)
def test_predict_freq():
# test that predicted dates have same frequency
x = np.arange(1,36.)
if _pandas_08x:
from pandas import date_range
# there's a bug in pandas up to 0.10.2 for YearBegin
#dates = date_range("1972-4-1", "2007-4-1", freq="AS-APR")
dates = date_range("1972-4-30", "2006-4-30", freq="A-APR")
series = Series(x, index=dates)
model = TimeSeriesModel(series)
#npt.assert_(model.data.freq == "AS-APR")
npt.assert_(model.data.freq == "A-APR")
start = model._get_predict_start("2006-4-30")
end = model._get_predict_end("2016-4-30")
model._make_predict_dates()
predict_dates = model.data.predict_dates
#expected_dates = date_range("2006-12-31", "2016-12-31",
# freq="AS-APR")
expected_dates = date_range("2006-4-30", "2016-4-30", freq="A-APR")
npt.assert_equal(predict_dates, expected_dates)
#ptesting.assert_series_equal(predict_dates, expected_dates)
else:
from pandas import DateRange, datetools
dates = DateRange("1972-1-1", "2007-1-1", offset=datetools.yearEnd)
series = Series(x, index=dates)
model = TimeSeriesModel(series)
npt.assert_(model.data.freq == "A")
start = model._get_predict_start("2006-12-31")
end = model._get_predict_end("2016-12-31")
model._make_predict_dates()
predict_dates = model.data.predict_dates
expected_dates = DateRange("2006-12-31", "2016-12-31",
offset=datetools.yearEnd)
npt.assert_array_equal(predict_dates, expected_dates)
def test_keyerror_start_date():
x = np.arange(1,36.)
if _pandas_08x:
from pandas import date_range
# there's a bug in pandas up to 0.10.2 for YearBegin
#dates = date_range("1972-4-1", "2007-4-1", freq="AS-APR")
dates = date_range("1972-4-30", "2006-4-30", freq="A-APR")
series = Series(x, index=dates)
model = TimeSeriesModel(series)
else:
from pandas import DateRange, datetools
dates = DateRange("1972-1-1", "2007-1-1", offset=datetools.yearEnd)
series = Series(x, index=dates)
model = TimeSeriesModel(series)
npt.assert_raises(ValueError, model._get_predict_start, "1970-4-30")
| apache-2.0 |
exa-analytics/exatomic | exatomic/formula.py | 2 | 2582 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Simple Formula
##################
"""
import numpy as np
import pandas as pd
from .core.error import StringFormulaError
from exatomic.base import isotopes, sym2mass
class SimpleFormula(pd.Series):
"""
A simple way of storing a chemical formula that contains no structural
information. Element symbols are in alphabetical order (e.g. 'B', 'C', 'Cl', 'Uuo')
>>> water = SimpleFormula('H(2)O(1)')
>>> naoh = SimpleFormula('Na(1)O(1)H(1)')
>>> naoh
SimpleFormula('H(1)Na(1)O(1)')
"""
@property
def mass(self):
"""
Returns:
mass (float): Mass (in atomic units) of the associated formula
"""
df = self.to_frame()
df['mass'] = df.index.map(sym2mass)
return (df['mass'] * df['count']).sum()
def as_string(self):
"""
Returns:
formula (str): String representation of the chemical formula.
"""
return ''.join(('{0}({1})'.format(key.title(), self[key]) for key in sorted(self.index)))
def __init__(self, data):
if isinstance(data, str):
data = string_to_dict(data)
super().__init__(data=data, dtype=np.int64, name='count')
self.index.names = ['symbol']
def __repr__(self):
return "{}('{}')".format(self.__class__.__name__, self.as_string())
def __str__(self):
return self.__repr__()
def string_to_dict(formula):
"""
Convert string formula to a dictionary.
Args:
formula (str): String formula representation
Returns:
fdict (dict): Dictionary formula representation
"""
obj = []
if ')' not in formula and len(formula) <= 3 and all((not char.isdigit() for char in formula)):
return {formula: 1}
elif ')' not in formula:
raise StringFormulaError(formula)
for s in formula.split(')'):
if s != '':
symbol, count = s.split('(')
obj.append((symbol, np.int64(count)))
return dict(obj)
def dict_to_string(formula):
"""
Convert a dictionary formula to a string.
Args:
formula (dict): Dictionary formula representation
Returns:
fstr (str): String formula representation
"""
return ''.join(('{0}({1})'.format(key.title(), formula[key]) for key in sorted(formula.keys()) if formula[key] > 0))
| apache-2.0 |
ndingwall/scikit-learn | examples/model_selection/plot_grid_search_refit_callable.py | 25 | 3648 | """
==================================================
Balance model complexity and cross-validated score
==================================================
This example balances model complexity and cross-validated score by
finding a decent accuracy within 1 standard deviation of the best accuracy
score while minimising the number of PCA components [1].
The figure shows the trade-off between cross-validated score and the number
of PCA components. The balanced case is when n_components=10 and accuracy=0.88,
which falls into the range within 1 standard deviation of the best accuracy
score.
[1] Hastie, T., Tibshirani, R.,, Friedman, J. (2001). Model Assessment and
Selection. The Elements of Statistical Learning (pp. 219-260). New York,
NY, USA: Springer New York Inc..
"""
# Author: Wenhao Zhang <wenhaoz@ucla.edu>
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
def lower_bound(cv_results):
"""
Calculate the lower bound within 1 standard deviation
of the best `mean_test_scores`.
Parameters
----------
cv_results : dict of numpy(masked) ndarrays
See attribute cv_results_ of `GridSearchCV`
Returns
-------
float
Lower bound within 1 standard deviation of the
best `mean_test_score`.
"""
best_score_idx = np.argmax(cv_results['mean_test_score'])
return (cv_results['mean_test_score'][best_score_idx]
- cv_results['std_test_score'][best_score_idx])
def best_low_complexity(cv_results):
"""
Balance model complexity with cross-validated score.
Parameters
----------
cv_results : dict of numpy(masked) ndarrays
See attribute cv_results_ of `GridSearchCV`.
Return
------
int
Index of a model that has the fewest PCA components
while has its test score within 1 standard deviation of the best
`mean_test_score`.
"""
threshold = lower_bound(cv_results)
candidate_idx = np.flatnonzero(cv_results['mean_test_score'] >= threshold)
best_idx = candidate_idx[cv_results['param_reduce_dim__n_components']
[candidate_idx].argmin()]
return best_idx
pipe = Pipeline([
('reduce_dim', PCA(random_state=42)),
('classify', LinearSVC(random_state=42, C=0.01)),
])
param_grid = {
'reduce_dim__n_components': [6, 8, 10, 12, 14]
}
grid = GridSearchCV(pipe, cv=10, n_jobs=1, param_grid=param_grid,
scoring='accuracy', refit=best_low_complexity)
X, y = load_digits(return_X_y=True)
grid.fit(X, y)
n_components = grid.cv_results_['param_reduce_dim__n_components']
test_scores = grid.cv_results_['mean_test_score']
plt.figure()
plt.bar(n_components, test_scores, width=1.3, color='b')
lower = lower_bound(grid.cv_results_)
plt.axhline(np.max(test_scores), linestyle='--', color='y',
label='Best score')
plt.axhline(lower, linestyle='--', color='.5', label='Best score - 1 std')
plt.title("Balance model complexity and cross-validated score")
plt.xlabel('Number of PCA components used')
plt.ylabel('Digit classification accuracy')
plt.xticks(n_components.tolist())
plt.ylim((0, 1.0))
plt.legend(loc='upper left')
best_index_ = grid.best_index_
print("The best_index_ is %d" % best_index_)
print("The n_components selected is %d" % n_components[best_index_])
print("The corresponding accuracy score is %.2f"
% grid.cv_results_['mean_test_score'][best_index_])
plt.show()
| bsd-3-clause |
Geosyntec/pybmp | pybmpdb/bmpdb.py | 2 | 22658 | import os
from pkg_resources import resource_filename
from functools import partial
from pathlib import Path
try:
import pyodbc
except ImportError:
pyodbc = None
import numpy
import pandas
from engarde import checks
from . import info, utils
import wqio
__all__ = [
'load_data',
'transform_parameters',
'paired_qual',
]
def _handle_ND_factors(df, qualcol='qual', rescol='res', dlcol='DL', quals=None,
nd_correction=2):
""" Determines the scaling factor to be applied to the water quality result
based on the result qualifiers in the BMP Database.
Parameters
----------
df : pandas.DataFrame
qualcol : str, optional (default = 'qual')
The column in *df* that contain the qualifiers.
rescol : str, optional (default = 'res')
The column in *df* that contain the results.
dlcol : str, optional (default = 'DL')
The column in *df* that contain the detection limts.
quals : list of str, optional.
A list of qualifiers that signify that a result is non-detect. Falls
back to ``['U', 'UK', 'UA', 'UC', 'K']`` when not provided.
nd_correction : float, optional (default = 2.0)
The factor by which non-detect results will be multiplied.
Returns
-------
factors : numpy.array
Notes
-----
The underlying assumption here is that the BMP Database reports non-detects
at half of their detection limit. So we need to double the reported value
to get the upper limit of the result for ROS/Kaplan-Meier imputation.
Also note that there are some weird cases where UJ-flagged data should be
given a different. This occurs when the reported result is greater than the
reported DL. Lastly, UJ-flagged data where the result is less than the DL
should be scaled by the ratio of the result to the DL, such that
result * factor = DL.
"""
quals = wqio.validate.at_least_empty_list(quals)
if not quals:
quals.extend(['U', 'UK', 'UA', 'UC', 'K'])
normal_ND = [df[qualcol].isin(quals), float(nd_correction)]
weird_UJ = [(df[qualcol] == 'UJ') & (df[rescol] < df[dlcol]), df[dlcol] / df[rescol]]
return wqio.utils.selector(1, normal_ND, weird_UJ)
def _handle_ND_qualifiers(df, qualcol='qual', rescol='res', dlcol='DL', quals=None):
""" Determines final qualifier to be applied to the water quality result
based on the result qualifiers in the BMP Database. Non-detects get "ND",
detected values get "=".
Parameters
----------
df : pandas.DataFrame
qualcol : str, optional (default = 'qual')
The column in *df* that contain the qualifiers.
rescol : str, optional (default = 'res')
The column in *df* that contain the results.
dlcol : str, optional (default = 'DL')
The column in *df* that contain the detection limts.
quals : list of str, optional.
A list of qualifiers that signify that a result is non-detect. Falls
back to ``['U', 'UA', 'UI', 'UC', 'UK', 'K']`` when not provided.
Returns
-------
qualifiers : numpy.array
See also
--------
_handle_ND_factors
Notes
-----
Same basic premise as _handle_ND_factors, but different qualifiers count
as ND compared to what we used to determine the ND-scaling factors.
"""
quals = wqio.validate.at_least_empty_list(quals)
if not quals:
quals.extend(['U', 'UA', 'UI', 'UC', 'UK', 'K'])
is_ND = df[qualcol].isin(quals) | ((df[qualcol] == 'UJ') & (df[rescol] <= df[dlcol]))
return numpy.where(is_ND, 'ND', '=')
def _process_screening(df, screencol):
yes = df[screencol].str.lower().isin(['inc', 'yes'])
no = df[screencol].str.lower().isin(['exc', 'no'])
return wqio.utils.selector('invalid', [yes, 'yes'], [no, 'no'])
def _process_sampletype(df, sampletype):
grab = [df[sampletype].str.lower().str.contains('grab'), 'grab']
composite = [df[sampletype].str.lower().str.contains('composite'), 'composite']
return wqio.utils.selector('unknown', grab, composite)
def _check_levelnames(levels):
good_levels = [
'category', 'site', 'bmp', 'parameter',
'sampletype', 'epazone', 'state', 'paramgroup'
]
msg = 'valid levels are {}'.format(good_levels)
for lvl in levels:
if lvl not in good_levels:
raise ValueError(msg)
def transform_parameters(df, existingparams, newparam, newunits, resfxn, qualfxn,
indexMods=None, paramlevel='parameter'):
""" Apply an arbitrary transformation to a parameter in the data
Parameters
----------
df : pandas.DataFrame
existingparams : list of strings
List of the existing parameters that will be used to compute
the new values
newparam : string
Name of the new parameter to be generated
newunits : string
Units of the newly computed values
resfxn : callable
Function (or lambda) that will determine the result of
``newparam`` based on the values of ``existingparams``.
Function must assume to be operating on a row of
``self.data`` with the elements of ``existingparams`` stored
as columns.
qualfxn : function
Same as ``resfxn``, but for determining the final qualifier
of the ``newparam`` results.
indexMods : dict, optional (keys = index level names)
Dictionary of index level name whose values are the new
values of those levels where ``parameter == newparam``.
Returns
-------
transformed : pandas.DataFrame
"""
index_name_cache = df.index.names
existingparams = wqio.validate.at_least_empty_list(existingparams)
transformed = (
df.query("{} in @existingparams".format(paramlevel))
.pipe(utils.refresh_index)
.unstack(level=paramlevel)
.pipe(wqio.utils.assign_multilevel_column, qualfxn, 'qual', newparam)
.pipe(wqio.utils.assign_multilevel_column, resfxn, 'res', newparam)
.xs(newparam, level=paramlevel, axis='columns', drop_level=False)
.stack(level=paramlevel)
)
indexMods = wqio.validate.at_least_empty_dict(indexMods, units=newunits)
# add the units into indexMod, apply all changes
indexMods['units'] = newunits
for levelname, value in indexMods.items():
transformed = wqio.utils.redefine_index_level(transformed, levelname, value,
criteria=None, dropold=True)
# return the *full* dataset (preserving original params)
result = pandas.concat([
df.reset_index(),
transformed.reset_index()
], sort=False).set_index(index_name_cache)
return result
def paired_qual(df, qualin='qual_inflow', qualout='qual_outflow'):
ND_neither = [(df[qualin] == '=') & (df[qualout] == '='), 'Pair']
ND_in = [(df[qualin] == 'ND') & (df[qualout] == '='), 'Influent ND']
ND_out = [(df[qualin] == '=') & (df[qualout] == 'ND'), 'Effluent ND']
ND_both = [(df[qualin] == 'ND') & (df[qualout] == 'ND'), 'Both ND']
return wqio.utils.selector('=', ND_neither, ND_in, ND_out, ND_both)
def _pick_non_null(df, maincol, preferred, secondary):
return df[(maincol, preferred)].combine_first(df[(maincol, secondary)])
def _pick_best_station(df):
def best_col(df, mainstation, backupstation, valcol):
for sta in [mainstation, backupstation]:
if (sta, valcol) not in df.columns:
df = wqio.utils.assign_multilevel_column(df, numpy.nan, sta, valcol)
return df[(mainstation, valcol)].combine_first(df[(backupstation, valcol)])
orig_index = df.index.names
data = (
df.pipe(utils.refresh_index)
.unstack(level='station')
.pipe(wqio.utils.swap_column_levels, 0, 1)
.pipe(wqio.utils.assign_multilevel_column,
lambda df: best_col(df, 'outflow', 'subsurface', 'res'),
'final_outflow', 'res')
.pipe(wqio.utils.assign_multilevel_column,
lambda df: best_col(df, 'outflow', 'subsurface', 'qual'),
'final_outflow', 'qual')
.pipe(wqio.utils.assign_multilevel_column,
lambda df: best_col(df, 'inflow', 'reference outflow', 'res'),
'final_inflow', 'res')
.pipe(wqio.utils.assign_multilevel_column,
lambda df: best_col(df, 'inflow', 'reference outflow', 'qual'),
'final_inflow', 'qual')
.loc[:, lambda df: df.columns.map(lambda c: 'final_' in c[0])]
.rename(columns=lambda col: col.replace('final_', ''))
.stack(level='station')
)
return data
def _pick_best_sampletype(df):
orig_cols = df.columns
xtab = df.pipe(utils.refresh_index).unstack(level='sampletype')
for col in orig_cols:
grabvalues = numpy.where(
xtab[(col, 'composite')].isnull(),
xtab[(col, 'grab')],
numpy.nan
)
xtab = wqio.utils.assign_multilevel_column(xtab, grabvalues, col, 'grab')
data = (
xtab.loc[:, xtab.columns.map(lambda c: c[1] != 'unknown')]
.stack(level=['sampletype'])
)
return data
def _maybe_filter_onesided_BMPs(df, balanced_only):
grouplevels = ['site', 'bmp', 'parameter', 'category']
pivotlevel = 'station'
if balanced_only:
return (
df.unstack(level=pivotlevel)
.groupby(level=grouplevels)
.filter(lambda g: numpy.all(g['res'].describe().loc['count'] > 0))
.stack(level=pivotlevel)
)
else:
return df
def _filter_by_storm_count(df, minstorms):
# filter out all monitoring stations with less than /N/ storms
grouplevels = ['site', 'bmp', 'parameter', 'station']
data = (
df.groupby(level=grouplevels)
.filter(lambda g: g.count()['res'] >= minstorms)
)
return data
def _filter_by_BMP_count(df, minbmps):
grouplevels = ['category', 'parameter', 'station']
data = (
df.groupby(level=grouplevels)
.filter(lambda g: g.index.get_level_values('bmp').unique().shape[0] >= minbmps)
)
return data
def _maybe_combine_WB_RP(df, combine_WB_RP, catlevel='category'):
if combine_WB_RP:
# merge Wetland Basins and Retention ponds, keeping
# the original records
wbrp_indiv = ['Retention Pond', 'Wetland Basin']
wbrp_combo = 'Wetland Basin/Retention Pond'
level_pos = utils.get_level_position(df, catlevel)
return wqio.utils.redefine_index_level(
df, catlevel, wbrp_combo, dropold=False,
criteria=lambda row: row[level_pos] in wbrp_indiv
).pipe(
checks.verify_any,
lambda df: df.index.get_level_values(catlevel) == wbrp_combo
)
else:
return df
def _maybe_combine_nox(df, combine_nox, paramlevel='parameter', rescol='res',
qualcol='qual', finalunits='mg/L'):
if combine_nox:
# combine NO3+NO2 and NO3 into NOx
nitro_components = [
'Nitrogen, Nitrite (NO2) + Nitrate (NO3) as N',
'Nitrogen, Nitrate (NO3) as N'
]
nitro_combined = 'Nitrogen, NOx as N'
picker = partial(_pick_non_null, preferred=nitro_components[0],
secondary=nitro_components[1])
return transform_parameters(
df, nitro_components, nitro_combined, finalunits,
partial(picker, maincol=rescol), partial(picker, maincol=qualcol)
).pipe(
checks.verify_any,
lambda df: df.index.get_level_values(paramlevel) == nitro_combined
)
else:
return df
def _maybe_fix_PFCs(df, fix_PFCs, catlevel='category', typelevel='bmptype'):
if fix_PFCs:
PFC = 'Permeable Friction Course'
type_level_pos = utils.get_level_position(df, typelevel)
return wqio.utils.redefine_index_level(
df, catlevel, PFC, dropold=True,
criteria=lambda row: row[type_level_pos] == 'PF'
).pipe(
checks.verify_any,
lambda df: df.index.get_level_values(catlevel) == PFC
)
else:
return df
def _maybe_remove_grabs(df, remove_grabs, grab_ok_bmps='default'):
if grab_ok_bmps == 'default':
grab_ok_bmps = ['Retention Pond', 'Wetland Basin', 'Wetland Basin/Retention Pond']
grab_ok_bmps = wqio.validate.at_least_empty_list(grab_ok_bmps)
if remove_grabs:
querytxt = (
"(sampletype == 'composite') | "
"(((category in @grab_ok_bmps) | (paramgroup == 'Biological')) & "
" (sampletype != 'unknown'))"
)
return df.query(querytxt)
else:
return df
def _load_raw_data(csvfile=None):
csvfile = Path(csvfile or wqio.download('bmpdata'))
return pandas.read_csv(csvfile, parse_dates=['sampledate'], encoding='utf-8')
def _clean_raw_data(raw_df):
_row_headers = [
'category', 'epazone', 'state', 'site', 'bmp',
'station', 'storm', 'sampletype', 'watertype',
'paramgroup', 'units', 'parameter', 'fraction',
'initialscreen', 'wqscreen', 'catscreen', 'balanced',
'bmptype', 'pdf_id', 'ws_id', 'site_id', 'bmp_id',
]
units_norm = {
u['unicode']: info.getNormalization(u['name'])
for u in info.units
}
target_units = {
p['name'].lower(): info.getUnitsFromParam(p['name'], attr='unicode')
for p in info.parameters
}
# rename columns:
rename_columns = {
'wq_qual': 'qual',
'wq_value': 'res',
'wq_units': 'units',
'raw_parameter': 'general_parameter',
'category': 'category'
}
expected_rows = raw_df.loc[:, 'wq_value'].groupby(lambda x: x > 0).count().loc[True]
biofilters = {
'Biofilter - Grass Swale': 'Grass Swale',
'Biofilter - Grass Strip': 'Grass Strip',
}
drop_columns = ['ms', '_parameter']
prepped = (
raw_df
.fillna({'wq_qual': '='})
.rename(columns=rename_columns)
.dropna(subset=['res'])
.assign(qual=lambda df: df['qual'].str.strip())
.assign(res=lambda df: df['res'] * _handle_ND_factors(df))
.assign(qual=lambda df: _handle_ND_qualifiers(df))
.assign(initialscreen=lambda df: _process_screening(df, 'initialscreen'))
.assign(wqscreen=lambda df: _process_screening(df, 'wqscreen'))
.assign(catscreen=lambda df: _process_screening(df, 'catscreen'))
.assign(station=lambda df: df['station'].str.lower())
.assign(sampletype=lambda df: _process_sampletype(df, 'sampletype'))
.assign(sampledatetime=lambda df: df.apply(wqio.utils.makeTimestamp, axis=1))
.assign(units=lambda df: df['units'].map(lambda u: info.getUnits(u, attr='unicode')))
.assign(_parameter=lambda df: df['parameter'].str.lower().str.strip())
.assign(fraction=lambda df: numpy.where(df['_parameter'].str.lower().str.contains('dissolved'), 'dissolved', 'total'))
.replace({'category': biofilters})
.pipe(wqio.utils.normalize_units, units_norm, target_units, paramcol='_parameter',
rescol='res', unitcol='units', napolicy='raise')
.drop(drop_columns, axis=1)
.query("res > 0")
.pipe(checks.none_missing, columns=_row_headers)
.groupby(by=_row_headers)
.agg({'res': 'mean', 'qual': 'min', 'sampledatetime': 'min'})
.set_index('sampledatetime', append=True)
.pipe(checks.unique_index)
)
return prepped
def _prepare_for_summary(df, minstorms=3, minbmps=3, combine_nox=True, combine_WB_RP=True,
remove_grabs=True, grab_ok_bmps='default', balanced_only=True,
fix_PFCs=True, excluded_bmps=None, excluded_params=None):
""" Prepare data for categorical summaries
Parameter
---------
df : pandas.DataFrame
minstorms : int (default = 3)
Minimum number of storms (monitoring events) for a BMP study to be included
minbmps : int (default = 3)
Minimum number of BMP studies for a parameter to be included
combine_nox : bool (default = True)
Toggles combining NO3 and NO2+NO3 into as new parameter NOx, giving
preference to NO2+NO3 when both parameters are observed for an event.
The underlying assuption is that NO2 concentrations are typically much
smaller than NO3, thus NO2+NO3 ~ NO3.
combine_WB_RP : bool (default = True)
Toggles combining Retention Pond and Wetland Basin data into a new
BMP category: Retention Pond/Wetland Basin.
remove_grabs : bool (default = True)
Toggles removing grab samples from the dataset except for:
- biological parameters
- BMPs categories that are whitelisted via *grab_ok_bmps*
grab_ok_bmps : sequence of str, optional
BMP categories for which grab data should be included. By default, this
inclues Retention Ponds, Wetland Basins, and the combined
Retention Pond/Wetland Basin category created when *combine_WB_RP* is
True.
balanced_only : bool (default = True)
Toggles removing BMP studies which have only influent or effluent data,
exclusively.
fix_PFCs : bool (default = True)
Makes correction to the category of Permeable Friction Course BMPs
excluded_bmps, excluded_params : sequence of str, optional
List of BMPs studies and parameters to exclude from the data.
Returns
-------
summarizable : pandas.DataFrame
"""
excluded_bmps = wqio.validate.at_least_empty_list(excluded_bmps)
excluded_params = wqio.validate.at_least_empty_list(excluded_params)
return (
df.pipe(utils._maybe_combine_WB_RP, combine_WB_RP)
.pipe(utils._maybe_combine_nox, combine_nox)
.pipe(utils._maybe_fix_PFCs, fix_PFCs)
.pipe(utils._maybe_remove_grabs, remove_grabs, grab_ok_bmps)
.query("bmp not in @excluded_bmps")
.query("parameter not in @excluded_params")
.pipe(utils._pick_best_sampletype)
.pipe(utils._pick_best_station)
.pipe(utils._maybe_filter_onesided_BMPs, balanced_only)
.pipe(utils._filter_by_storm_count, minstorms)
.pipe(utils._filter_by_BMP_count, minbmps)
)
def load_data(datapath=None, minstorms=3, minbmps=3, combine_nox=True, combine_WB_RP=True,
remove_grabs=True, grab_ok_bmps='default', balanced_only=True,
fix_PFCs=True, excluded_bmps=None, excluded_params=None,
as_dataframe=False, **dc_kwargs):
""" Prepare data for categorical summaries
Parameter
---------
datapath : Path-like, optional
Path to the raw data CSV. If not provided, the latest data will be
downloaded.
minstorms : int (default = 3)
Minimum number of storms (monitoring events) for a BMP study to be included
minbmps : int (default = 3)
Minimum number of BMP studies for a parameter to be included
combine_nox : bool (default = True)
Toggles combining NO3 and NO2+NO3 into as new parameter NOx, giving
preference to NO2+NO3 when both parameters are observed for an event.
The underlying assuption is that NO2 concentrations are typically much
smaller than NO3, thus NO2+NO3 ~ NO3.
combine_WB_RP : bool (default = True)
Toggles combining Retention Pond and Wetland Basin data into a new
BMP category: Retention Pond/Wetland Basin.
remove_grabs : bool (default = True)
Toggles removing grab samples from the dataset except for:
- biological parameters
- BMPs categories that are whitelisted via *grab_ok_bmps*
grab_ok_bmps : sequence of str, optional
BMP categories for which grab data should be included. By default, this
inclues Retention Ponds, Wetland Basins, and the combined
Retention Pond/Wetland Basin category created when *combine_WB_RP* is
True.
balanced_only : bool (default = True)
Toggles removing BMP studies which have only influent or effluent data,
exclusively.
fix_PFCs : bool (default = True)
Makes correction to the category of Permeable Friction Course BMPs
excluded_bmps, excluded_params : sequence of str, optional
List of BMPs studies and parameters to exclude from the data.
as_dataframe : bool (default = False)
When False, a wqio.DataCollection is returned
Additional Parameters
---------------------
Any additional keword arguments will be passed to wqio.DataCollection.
Returns
-------
bmp : pandas.DataFrame or wqio.DataCollection
"""
othergroups = dc_kwargs.pop('othergroups', ['category', 'units'])
pairgroups = dc_kwargs.pop('pairgroups', ['category', 'units', 'bmp_id', 'site_id', 'storm'])
rescol = dc_kwargs.pop('rescol', 'res')
qualcol = dc_kwargs.pop('qualcol', 'qual')
ndval = dc_kwargs.pop('ndval', ['ND', '<'],)
stationcol = dc_kwargs.pop('stationcol', 'station')
paramcol = dc_kwargs.pop('paramcol', 'parameter')
bmp = (
_load_raw_data(datapath)
.pipe(_clean_raw_data)
.pipe(_prepare_for_summary, minstorms=minstorms, minbmps=minbmps,
combine_nox=combine_nox, combine_WB_RP=combine_WB_RP,
remove_grabs=remove_grabs, grab_ok_bmps=grab_ok_bmps,
balanced_only=balanced_only, fix_PFCs=fix_PFCs,
excluded_bmps=excluded_bmps, excluded_params=excluded_params)
)
if as_dataframe:
return bmp
return wqio.DataCollection(bmp, rescol='res', qualcol='qual', ndval=['ND'],
stationcol='station', paramcol='parameter',
othergroups=othergroups, pairgroups=pairgroups,
**dc_kwargs)
| bsd-3-clause |
devanshdalal/scikit-learn | examples/ensemble/plot_feature_transformation.py | 115 | 4327 | """
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <betatim@gmail.com>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.pipeline import make_pipeline
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator,
random_state=0)
rt_lm = LogisticRegression()
pipeline = make_pipeline(rt, rt_lm)
pipeline.fit(X_train, y_train)
y_pred_rt = pipeline.predict_proba(X_test)[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
Upward-Spiral-Science/spect-team | utils/split.py | 1 | 2434 | #!usr/bin/env python
# -*- coding: utf-8 -*-
# Usage: python [files]
# Data Cleaning and Splitting
import pandas as pd
# Get row summary, return a list of indices of rows that contains NaNs
def row_summary(df):
# Extract column headers
featNames = list(df.columns.get_values())
# Get row summary (whether number of NaNs in each row)
row_summary = df.isnull().sum(axis=1)
# Get incomplete row indices
nan_row_inds = list() # incomplete row indices
for i, x in enumerate(row_summary):
if x > 0: nan_row_inds.append(i)
return nan_row_inds
# Drop the incomplete records (has NaNs)
def clean_records(df):
nan_row_inds = row_summary(df)
clean_df = df.drop(df.index[nan_row_inds], inplace=False)
# Double check for NaNs
print 'Is there any NaNs in the clean records?', clean_df.isnull().values.any()
return clean_df
# Separate data columns into a subgroup and save into a csv file
def split_data(df, start_ind, end_ind, csv_name):
subdf = df.iloc[:, [i for i in range(start_ind, end_ind)]]
# Add patient ID columns
if start_ind > 0:
key = df['Patient_ID'] # unique record key
subdf = pd.concat([key, subdf], axis=1)
subdf.to_csv(csv_name, index=False)
return subdf
if __name__ == '__main__':
df = pd.read_csv('Data_Adults_1.csv')
clean_df = clean_records(df)
n, m = clean_df.shape
# Find the splitting point
featNames = list(df.columns.get_values())
split_point0 = featNames.index('Adjustment_Disorder')
split_point1 = featNames.index('BSC_Respondent')
split_point2 = featNames.index('Baseline_header_id')
df0 = split_data(clean_df, 0, split_point0, 'patient_info.csv') # patient information
df1 = split_data(clean_df, split_point0, split_point1, 'disorders.csv') # disorders
df2 = split_data(clean_df, split_point1, split_point2, 'unknown.csv') # unknown measurements
df3 = split_data(clean_df, split_point2, m, 'base_concen.csv') # baseline-concentration
# Separate baseline and concentration columns
base_heads, concen_heads = ['Patient_ID'], []
for h in df3.columns:
if 'Baseline' in h:
base_heads.append(h)
else:
concen_heads.append(h)
df_base = df3[base_heads]
df_base.to_csv('baseline.csv', index=False)
df_concen = df3[concen_heads]
df_concen.to_csv('concentration.csv', index=False)
| apache-2.0 |
siconos/siconos-deb | examples/Control/Relay/Filippov-NL.py | 1 | 2996 | #!/usr/bin/env python
# Siconos is a program dedicated to modeling, simulation and control
# of non smooth dynamical systems.
#
# Copyright 2016 INRIA.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import matplotlib
matplotlib.use('Agg')
from matplotlib.pyplot import subplot, title, plot, grid, savefig
from numpy import array, eye, empty, zeros, savetxt
from siconos.kernel import FirstOrderLinearDS, FirstOrderLinearTIR, RelayNSL,\
NonSmoothDynamicalSystem, Model, TimeDiscretisation, TimeStepping, EulerMoreauOSI, \
Interaction, Relay
from math import ceil
import MyR, MyNonLinearR
# variables
t0 = 0.0 # start time
T = 1.0 # end time
h = 1.0e-3 # time step
numInter = 2
ninter = 2
theta = 0.5
alpha = .01
N = ceil((T-t0)/h)
# matrices
A = zeros((2,2))
x0 = array([10.,10.])
B = 500*array([[alpha,1-alpha],[-(1-alpha),alpha]])
C = eye(2)
D = zeros((2,2))
# dynamical systems
process = FirstOrderLinearDS(x0, A)
#myProcessRelation = MyR.MyR(C,B)
myProcessRelation = MyNonLinearR.MyNonLinearR(C,B)
myProcessRelation.setDPtr(D)
myNslaw = RelayNSL(2)
myNslaw.display()
myProcessInteraction = Interaction(ninter, myNslaw,
myProcessRelation)
myNSDS = NonSmoothDynamicalSystem()
myNSDS.insertDynamicalSystem(process)
myNSDS.link(myProcessInteraction,process)
filippov = Model(t0,T)
filippov.setNonSmoothDynamicalSystemPtr(myNSDS)
td = TimeDiscretisation(t0, h)
s = TimeStepping(td)
myIntegrator = EulerMoreauOSI(theta)
s.insertIntegrator(myIntegrator)
#TODO python <- SICONOS_RELAY_LEMKE
# access dparam
osnspb = Relay()
s.insertNonSmoothProblem(osnspb)
s.setComputeResiduY(True)
s.setComputeResiduR(True)
filippov.initialize(s);
# matrix to save data
dataPlot = empty((N+1,4))
dataPlot[0, 0] = t0
dataPlot[0, 1:3] = process.x()
dataPlot[0, 3] = myProcessInteraction.lambda_(0)[0]
# time loop
k = 1
while(s.hasNextEvent()):
s.computeOneStep()
dataPlot[k, 0] = s.nextTime()
dataPlot[k, 1] = process.x()[0]
dataPlot[k, 2] = process.x()[1]
dataPlot[k, 3] = myProcessInteraction.lambda_(0)[0]
k += 1
s.nextStep()
#print s.nextTime()
# save to disk
savetxt('output.txt', dataPlot)
# plot interesting stuff
subplot(311)
title('position')
plot(dataPlot[:,0], dataPlot[:,1])
grid()
subplot(312)
title('velocity')
plot(dataPlot[:,0], dataPlot[:,2])
grid()
subplot(313)
plot(dataPlot[:,0], dataPlot[:,3])
title('lambda')
grid()
savefig("Filipov_NL1.png")
plot(dataPlot[:,1], dataPlot[:,2])
grid()
savefig("Filipov_NL2.png")
| apache-2.0 |
radiasoft/radtrack | experimental/laserHeater/testBorisVay.py | 1 | 2673 | __author__ = 'swebb'
from matplotlib import pyplot as plt
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
mpl.rc('text', usetex=True)
from radtrack.ptclmovers.RbBorisVay import RbBorisVay
import scipy.constants as consts
import numpy as np
try:
dt = 1.e-8
B = [1.e-5 * np.array([1., 0.1, 0.])]
E = [1.e-5 * np.array([0., 0., 1.])]
charge = consts.e
mass = consts.electron_mass
pusher = RbBorisVay(charge, mass, dt)
x = []
y = []
z = []
gamma = 3.
uMag = consts.c * np.sqrt(gamma**2 - 1.)
uDir = 1.4*np.pi
uDir2 = 0.1*np.pi
v0 = [np.array([uMag * np.cos(uDir) * np.sin(uDir2), uMag * np.sin(uDir) *
np.sin(uDir2), uMag * np.cos(uDir2)])]
x0 = [np.zeros(3)]
x.append(x0[0][0])
y.append(x0[0][1])
z.append(x0[0][2])
gammaArray = []
gamma = np.sqrt(np.dot(v0[0], v0[0])/consts.c**2 + 1)
gammaArray.append(gamma)
x0 = pusher.halfmove(v0, x0, +1)
for idx in range(10000):
v0 = pusher.accelerate(v0, E, B)
x0 = pusher.move(v0, x0)
x.append(x0[0][0])
y.append(x0[0][1])
z.append(x0[0][2])
gamma = np.sqrt(np.dot(v0[0], v0[0])/consts.c**2 + 1)
gammaArray.append(gamma)
x0 = pusher.halfmove(v0, x0, -1)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(x, y, z, linewidth=2)
ax.plot(x, y, zs=min(z), zdir='z', alpha=0.25, c='k')
ax.plot(x, z, zs=min(y), zdir='y', alpha=0.25, c='k')
ax.plot(y, z, zs=min(x), zdir='x', alpha=0.25, c='k')
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
ax.set_xlim(min(x), max(x))
ax.set_ylim(min(y), max(y))
ax.set_zlim(min(z), max(z))
ax.legend()
plt.savefig('BorisVayTrajectory.png')
plt.clf()
expectedx0 = np.array([-3563.32861125, 336.10744549, 550.92910024])
expectedv0 = np.array([ -1.77869689e+08, 7.19781526e+08, -4.11437817e+08])
tol = 1.e-8
failed = False
xerror = x0[0] - expectedx0
uerror = v0[0] - expectedv0
metricX = np.dot(xerror, xerror)/np.dot(expectedx0, expectedx0)
metricV = np.dot(uerror, uerror)/np.dot(expectedv0, expectedv0)
print 'testBorisVay:'
print 'Xerror =', metricX
print 'Verror =', metricV
if metricX > tol:
print 'X failed tolerances with x =', x0, ', Expected:', expectedx0
failed = True
if metricV > tol:
print 'V failed tolerances with v =', v0, ', Expected:', expectedv0
failed = True
if failed:
raise Exception('emPusherTest has failed')
except Exception as e:
print e
raise
print 'Passed.' | apache-2.0 |
nicproulx/mne-python | mne/channels/layout.py | 2 | 33662 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Marijn van Vliet <w.m.vanvliet@gmail.com>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: Simplified BSD
import logging
from collections import defaultdict
from itertools import combinations
import os.path as op
import numpy as np
from ..transforms import _pol_to_cart, _cart_to_sph
from ..bem import fit_sphere_to_headshape
from ..io.pick import pick_types
from ..io.constants import FIFF
from ..io.meas_info import Info
from ..utils import _clean_names, warn
from ..externals.six.moves import map
class Layout(object):
"""Sensor layouts.
Layouts are typically loaded from a file using read_layout. Only use this
class directly if you're constructing a new layout.
Parameters
----------
box : tuple of length 4
The box dimension (x_min, x_max, y_min, y_max).
pos : array, shape=(n_channels, 4)
The positions of the channels in 2d (x, y, width, height).
names : list
The channel names.
ids : list
The channel ids.
kind : str
The type of Layout (e.g. 'Vectorview-all').
"""
def __init__(self, box, pos, names, ids, kind): # noqa: D102
self.box = box
self.pos = pos
self.names = names
self.ids = ids
self.kind = kind
def save(self, fname):
"""Save Layout to disk.
Parameters
----------
fname : str
The file name (e.g. 'my_layout.lout').
See Also
--------
read_layout
"""
x = self.pos[:, 0]
y = self.pos[:, 1]
width = self.pos[:, 2]
height = self.pos[:, 3]
if fname.endswith('.lout'):
out_str = '%8.2f %8.2f %8.2f %8.2f\n' % self.box
elif fname.endswith('.lay'):
out_str = ''
else:
raise ValueError('Unknown layout type. Should be of type '
'.lout or .lay.')
for ii in range(x.shape[0]):
out_str += ('%03d %8.2f %8.2f %8.2f %8.2f %s\n' % (self.ids[ii],
x[ii], y[ii], width[ii], height[ii], self.names[ii]))
f = open(fname, 'w')
f.write(out_str)
f.close()
def __repr__(self):
"""Return the string representation."""
return '<Layout | %s - Channels: %s ...>' % (self.kind,
', '.join(self.names[:3]))
def plot(self, show=True):
"""Plot the sensor positions.
Parameters
----------
show : bool
Show figure if True. Defaults to True.
Returns
-------
fig : instance of matplotlib figure
Figure containing the sensor topography.
Notes
-----
.. versionadded:: 0.12.0
"""
from ..viz.topomap import plot_layout
return plot_layout(self, show=show)
def _read_lout(fname):
"""Aux function."""
with open(fname) as f:
box_line = f.readline() # first line contains box dimension
box = tuple(map(float, box_line.split()))
names, pos, ids = [], [], []
for line in f:
splits = line.split()
if len(splits) == 7:
cid, x, y, dx, dy, chkind, nb = splits
name = chkind + ' ' + nb
else:
cid, x, y, dx, dy, name = splits
pos.append(np.array([x, y, dx, dy], dtype=np.float))
names.append(name)
ids.append(int(cid))
pos = np.array(pos)
return box, pos, names, ids
def _read_lay(fname):
"""Aux function."""
with open(fname) as f:
box = None
names, pos, ids = [], [], []
for line in f:
splits = line.split()
if len(splits) == 7:
cid, x, y, dx, dy, chkind, nb = splits
name = chkind + ' ' + nb
else:
cid, x, y, dx, dy, name = splits
pos.append(np.array([x, y, dx, dy], dtype=np.float))
names.append(name)
ids.append(int(cid))
pos = np.array(pos)
return box, pos, names, ids
def read_layout(kind, path=None, scale=True):
"""Read layout from a file.
Parameters
----------
kind : str
The name of the .lout file (e.g. kind='Vectorview-all' for
'Vectorview-all.lout').
path : str | None
The path of the folder containing the Layout file. Defaults to the
mne/channels/data/layouts folder inside your mne-python installation.
scale : bool
Apply useful scaling for out the box plotting using layout.pos.
Defaults to True.
Returns
-------
layout : instance of Layout
The layout.
See Also
--------
Layout.save
"""
if path is None:
path = op.join(op.dirname(__file__), 'data', 'layouts')
if not kind.endswith('.lout') and op.exists(op.join(path, kind + '.lout')):
kind += '.lout'
elif not kind.endswith('.lay') and op.exists(op.join(path, kind + '.lay')):
kind += '.lay'
if kind.endswith('.lout'):
fname = op.join(path, kind)
kind = kind[:-5]
box, pos, names, ids = _read_lout(fname)
elif kind.endswith('.lay'):
fname = op.join(path, kind)
kind = kind[:-4]
box, pos, names, ids = _read_lay(fname)
kind.endswith('.lay')
else:
raise ValueError('Unknown layout type. Should be of type '
'.lout or .lay.')
if scale:
pos[:, 0] -= np.min(pos[:, 0])
pos[:, 1] -= np.min(pos[:, 1])
scaling = max(np.max(pos[:, 0]), np.max(pos[:, 1])) + pos[0, 2]
pos /= scaling
pos[:, :2] += 0.03
pos[:, :2] *= 0.97 / 1.03
pos[:, 2:] *= 0.94
return Layout(box=box, pos=pos, names=names, kind=kind, ids=ids)
def make_eeg_layout(info, radius=0.5, width=None, height=None, exclude='bads'):
"""Create .lout file from EEG electrode digitization.
Parameters
----------
info : instance of Info
Measurement info (e.g., raw.info).
radius : float
Viewport radius as a fraction of main figure height. Defaults to 0.5.
width : float | None
Width of sensor axes as a fraction of main figure height. By default,
this will be the maximum width possible without axes overlapping.
height : float | None
Height of sensor axes as a fraction of main figure height. By default,
this will be the maximum height possible withough axes overlapping.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any.
If 'bads', exclude channels in info['bads'] (default).
Returns
-------
layout : Layout
The generated Layout.
See Also
--------
make_grid_layout, generate_2d_layout
"""
if not (0 <= radius <= 0.5):
raise ValueError('The radius parameter should be between 0 and 0.5.')
if width is not None and not (0 <= width <= 1.0):
raise ValueError('The width parameter should be between 0 and 1.')
if height is not None and not (0 <= height <= 1.0):
raise ValueError('The height parameter should be between 0 and 1.')
picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=exclude)
loc2d = _auto_topomap_coords(info, picks)
names = [info['chs'][i]['ch_name'] for i in picks]
# Scale [x, y] to [-0.5, 0.5]
loc2d_min = np.min(loc2d, axis=0)
loc2d_max = np.max(loc2d, axis=0)
loc2d = (loc2d - (loc2d_max + loc2d_min) / 2.) / (loc2d_max - loc2d_min)
# If no width or height specified, calculate the maximum value possible
# without axes overlapping.
if width is None or height is None:
width, height = _box_size(loc2d, width, height, padding=0.1)
# Scale to viewport radius
loc2d *= 2 * radius
# Some subplot centers will be at the figure edge. Shrink everything so it
# fits in the figure.
scaling = min(1 / (1. + width), 1 / (1. + height))
loc2d *= scaling
width *= scaling
height *= scaling
# Shift to center
loc2d += 0.5
n_channels = loc2d.shape[0]
pos = np.c_[loc2d[:, 0] - 0.5 * width,
loc2d[:, 1] - 0.5 * height,
width * np.ones(n_channels),
height * np.ones(n_channels)]
box = (0, 1, 0, 1)
ids = 1 + np.arange(n_channels)
layout = Layout(box=box, pos=pos, names=names, kind='EEG', ids=ids)
return layout
def make_grid_layout(info, picks=None, n_col=None):
"""Generate .lout file for custom data, i.e., ICA sources.
Parameters
----------
info : instance of Info | None
Measurement info (e.g., raw.info). If None, default names will be
employed.
picks : array-like of int | None
The indices of the channels to be included. If None, al misc channels
will be included.
n_col : int | None
Number of columns to generate. If None, a square grid will be produced.
Returns
-------
layout : Layout
The generated layout.
See Also
--------
make_eeg_layout, generate_2d_layout
"""
if picks is None:
picks = pick_types(info, misc=True, ref_meg=False, exclude='bads')
names = [info['chs'][k]['ch_name'] for k in picks]
if not names:
raise ValueError('No misc data channels found.')
ids = list(range(len(picks)))
size = len(picks)
if n_col is None:
# prepare square-like layout
n_row = n_col = np.sqrt(size) # try square
if n_col % 1:
# try n * (n-1) rectangle
n_col, n_row = int(n_col + 1), int(n_row)
if n_col * n_row < size: # jump to the next full square
n_row += 1
else:
n_row = int(np.ceil(size / float(n_col)))
# setup position grid
x, y = np.meshgrid(np.linspace(-0.5, 0.5, n_col),
np.linspace(-0.5, 0.5, n_row))
x, y = x.ravel()[:size], y.ravel()[:size]
width, height = _box_size(np.c_[x, y], padding=0.1)
# Some axes will be at the figure edge. Shrink everything so it fits in the
# figure. Add 0.01 border around everything
border_x, border_y = (0.01, 0.01)
x_scaling = 1 / (1. + width + border_x)
y_scaling = 1 / (1. + height + border_y)
x = x * x_scaling
y = y * y_scaling
width *= x_scaling
height *= y_scaling
# Shift to center
x += 0.5
y += 0.5
# calculate pos
pos = np.c_[x - 0.5 * width, y - 0.5 * height,
width * np.ones(size), height * np.ones(size)]
box = (0, 1, 0, 1)
layout = Layout(box=box, pos=pos, names=names, kind='grid-misc', ids=ids)
return layout
def find_layout(info, ch_type=None, exclude='bads'):
"""Choose a layout based on the channels in the info 'chs' field.
Parameters
----------
info : instance of Info
The measurement info.
ch_type : {'mag', 'grad', 'meg', 'eeg'} | None
The channel type for selecting single channel layouts.
Defaults to None. Note, this argument will only be considered for
VectorView type layout. Use `meg` to force using the full layout
in situations where the info does only contain one sensor type.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any.
If 'bads', exclude channels in info['bads'] (default).
Returns
-------
layout : Layout instance | None
None if layout not found.
"""
our_types = ' or '.join(['`None`', '`mag`', '`grad`', '`meg`'])
if ch_type not in (None, 'meg', 'mag', 'grad', 'eeg'):
raise ValueError('Invalid channel type (%s) requested '
'`ch_type` must be %s' % (ch_type, our_types))
chs = info['chs']
# Only take first 16 bits, as higher bits store CTF comp order
coil_types = set([ch['coil_type'] & 0xFFFF for ch in chs])
channel_types = set([ch['kind'] for ch in chs])
has_vv_mag = any(k in coil_types for k in
[FIFF.FIFFV_COIL_VV_MAG_T1, FIFF.FIFFV_COIL_VV_MAG_T2,
FIFF.FIFFV_COIL_VV_MAG_T3])
has_vv_grad = any(k in coil_types for k in [FIFF.FIFFV_COIL_VV_PLANAR_T1,
FIFF.FIFFV_COIL_VV_PLANAR_T2,
FIFF.FIFFV_COIL_VV_PLANAR_T3])
has_vv_meg = has_vv_mag and has_vv_grad
has_vv_only_mag = has_vv_mag and not has_vv_grad
has_vv_only_grad = has_vv_grad and not has_vv_mag
is_old_vv = ' ' in chs[0]['ch_name']
has_4D_mag = FIFF.FIFFV_COIL_MAGNES_MAG in coil_types
ctf_other_types = (FIFF.FIFFV_COIL_CTF_REF_MAG,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD)
has_CTF_grad = (FIFF.FIFFV_COIL_CTF_GRAD in coil_types or
(FIFF.FIFFV_MEG_CH in channel_types and
any(k in ctf_other_types for k in coil_types)))
# hack due to MNE-C bug in IO of CTF
# only take first 16 bits, as higher bits store CTF comp order
n_kit_grads = sum(ch['coil_type'] & 0xFFFF == FIFF.FIFFV_COIL_KIT_GRAD
for ch in chs)
has_any_meg = any([has_vv_mag, has_vv_grad, has_4D_mag, has_CTF_grad,
n_kit_grads])
has_eeg_coils = (FIFF.FIFFV_COIL_EEG in coil_types and
FIFF.FIFFV_EEG_CH in channel_types)
has_eeg_coils_and_meg = has_eeg_coils and has_any_meg
has_eeg_coils_only = has_eeg_coils and not has_any_meg
if ch_type == "meg" and not has_any_meg:
raise RuntimeError('No MEG channels present. Cannot find MEG layout.')
if ch_type == "eeg" and not has_eeg_coils:
raise RuntimeError('No EEG channels present. Cannot find EEG layout.')
if ((has_vv_meg and ch_type is None) or
(any([has_vv_mag, has_vv_grad]) and ch_type == 'meg')):
layout_name = 'Vectorview-all'
elif has_vv_only_mag or (has_vv_meg and ch_type == 'mag'):
layout_name = 'Vectorview-mag'
elif has_vv_only_grad or (has_vv_meg and ch_type == 'grad'):
if info['ch_names'][0].endswith('X'):
layout_name = 'Vectorview-grad_norm'
else:
layout_name = 'Vectorview-grad'
elif ((has_eeg_coils_only and ch_type in [None, 'eeg']) or
(has_eeg_coils_and_meg and ch_type == 'eeg')):
if not isinstance(info, (dict, Info)):
raise RuntimeError('Cannot make EEG layout, no measurement info '
'was passed to `find_layout`')
return make_eeg_layout(info, exclude=exclude)
elif has_4D_mag:
layout_name = 'magnesWH3600'
elif has_CTF_grad:
layout_name = 'CTF-275'
elif n_kit_grads > 0:
layout_name = _find_kit_layout(info, n_kit_grads)
else:
xy = _auto_topomap_coords(info, picks=range(info['nchan']),
ignore_overlap=True, to_sphere=False)
return generate_2d_layout(xy, ch_names=info['ch_names'], name='custom',
normalize=False)
layout = read_layout(layout_name)
if not is_old_vv:
layout.names = _clean_names(layout.names, remove_whitespace=True)
if has_CTF_grad:
layout.names = _clean_names(layout.names, before_dash=True)
# Apply mask for excluded channels.
if exclude == 'bads':
exclude = info['bads']
idx = [ii for ii, name in enumerate(layout.names) if name not in exclude]
layout.names = [layout.names[ii] for ii in idx]
layout.pos = layout.pos[idx]
layout.ids = [layout.ids[ii] for ii in idx]
return layout
def _find_kit_layout(info, n_grads):
"""Determine the KIT layout.
Parameters
----------
info : Info
Info object.
n_grads : int
Number of KIT-gradiometers in the info.
Returns
-------
kit_layout : str
One of 'KIT-AD', 'KIT-157' or 'KIT-UMD'.
"""
if info['kit_system_id'] is not None:
# avoid circular import
from ..io.kit.constants import KIT_LAYOUT
if info['kit_system_id'] in KIT_LAYOUT:
kit_layout = KIT_LAYOUT[info['kit_system_id']]
if kit_layout is not None:
return kit_layout
raise NotImplementedError("The layout for the KIT system with ID %i "
"is missing. Please contact the developers "
"about adding it." % info['kit_system_id'])
elif n_grads > 157:
return 'KIT-AD'
# channels which are on the left hemisphere for NY and right for UMD
test_chs = ('MEG 13', 'MEG 14', 'MEG 15', 'MEG 16', 'MEG 25',
'MEG 26', 'MEG 27', 'MEG 28', 'MEG 29', 'MEG 30',
'MEG 31', 'MEG 32', 'MEG 57', 'MEG 60', 'MEG 61',
'MEG 62', 'MEG 63', 'MEG 64', 'MEG 73', 'MEG 90',
'MEG 93', 'MEG 95', 'MEG 96', 'MEG 105', 'MEG 112',
'MEG 120', 'MEG 121', 'MEG 122', 'MEG 123', 'MEG 124',
'MEG 125', 'MEG 126', 'MEG 142', 'MEG 144', 'MEG 153',
'MEG 154', 'MEG 155', 'MEG 156')
x = [ch['loc'][0] < 0 for ch in info['chs'] if ch['ch_name'] in test_chs]
if np.all(x):
return 'KIT-157' # KIT-NY
elif np.all(np.invert(x)):
raise NotImplementedError("Guessing sensor layout for legacy UMD "
"files is not implemented. Please convert "
"your files using MNE-Python 0.13 or "
"higher.")
else:
raise RuntimeError("KIT system could not be determined for data")
def _box_size(points, width=None, height=None, padding=0.0):
"""Given a series of points, calculate an appropriate box size.
Parameters
----------
points : array, shape (n_points, 2)
The centers of the axes as a list of (x, y) coordinate pairs. Normally
these are points in the range [0, 1] centered at 0.5.
width : float | None
An optional box width to enforce. When set, only the box height will be
calculated by the function.
height : float | None
An optional box height to enforce. When set, only the box width will be
calculated by the function.
padding : float
Portion of the box to reserve for padding. The value can range between
0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding).
Returns
-------
width : float
Width of the box
height : float
Height of the box
"""
from scipy.spatial.distance import pdist
def xdiff(a, b):
return np.abs(a[0] - b[0])
def ydiff(a, b):
return np.abs(a[1] - b[1])
points = np.asarray(points)
all_combinations = list(combinations(points, 2))
if width is None and height is None:
if len(points) <= 1:
# Trivial case first
width = 1.0
height = 1.0
else:
# Find the closest two points A and B.
a, b = all_combinations[np.argmin(pdist(points))]
# The closest points define either the max width or max height.
w, h = xdiff(a, b), ydiff(a, b)
if w > h:
width = w
else:
height = h
# At this point, either width or height is known, or both are known.
if height is None:
# Find all axes that could potentially overlap horizontally.
hdist = pdist(points, xdiff)
candidates = [all_combinations[i] for i, d in enumerate(hdist)
if d < width]
if len(candidates) == 0:
# No axes overlap, take all the height you want.
height = 1.0
else:
# Find an appropriate height so all none of the found axes will
# overlap.
height = np.min([ydiff(*c) for c in candidates])
elif width is None:
# Find all axes that could potentially overlap vertically.
vdist = pdist(points, ydiff)
candidates = [all_combinations[i] for i, d in enumerate(vdist)
if d < height]
if len(candidates) == 0:
# No axes overlap, take all the width you want.
width = 1.0
else:
# Find an appropriate width so all none of the found axes will
# overlap.
width = np.min([xdiff(*c) for c in candidates])
# Add a bit of padding between boxes
width *= 1 - padding
height *= 1 - padding
return width, height
def _find_topomap_coords(info, picks, layout=None):
"""Guess the E/MEG layout and return appropriate topomap coordinates.
Parameters
----------
info : instance of Info
Measurement info.
picks : list of int
Channel indices to generate topomap coords for.
layout : None | instance of Layout
Enforce using a specific layout. With None, a new map is generated.
With None, a layout is chosen based on the channels in the chs
parameter.
Returns
-------
coords : array, shape = (n_chs, 2)
2 dimensional coordinates for each sensor for a topomap plot.
"""
if len(picks) == 0:
raise ValueError("Need more than 0 channels.")
if layout is not None:
chs = [info['chs'][i] for i in picks]
pos = [layout.pos[layout.names.index(ch['ch_name'])] for ch in chs]
pos = np.asarray(pos)
else:
pos = _auto_topomap_coords(info, picks)
return pos
def _auto_topomap_coords(info, picks, ignore_overlap=False, to_sphere=True):
"""Make a 2 dimensional sensor map from sensor positions in an info dict.
The default is to use the electrode locations. The fallback option is to
attempt using digitization points of kind FIFFV_POINT_EEG. This only works
with EEG and requires an equal number of digitization points and sensors.
Parameters
----------
info : instance of Info
The measurement info.
picks : list of int
The channel indices to generate topomap coords for.
ignore_overlap : bool
Whether to ignore overlapping positions in the layout. If False and
positions overlap, an error is thrown.
to_sphere : bool
If True, the radial distance of spherical coordinates is ignored, in
effect fitting the xyz-coordinates to a sphere. Defaults to True.
Returns
-------
locs : array, shape = (n_sensors, 2)
An array of positions of the 2 dimensional map.
"""
from scipy.spatial.distance import pdist, squareform
chs = [info['chs'][i] for i in picks]
# Use channel locations if available
locs3d = np.array([ch['loc'][:3] for ch in chs])
# If electrode locations are not available, use digization points
if len(locs3d) == 0 or np.allclose(locs3d, 0):
logging.warning('Did not find any electrode locations the info, '
'will attempt to use digitization points instead. '
'However, if digitization points do not correspond to '
'the EEG electrodes, this will lead to bad results. '
'Please verify that the sensor locations in the plot '
'are accurate.')
# MEG/EOG/ECG sensors don't have digitization points; all requested
# channels must be EEG
for ch in chs:
if ch['kind'] != FIFF.FIFFV_EEG_CH:
raise ValueError("Cannot determine location of MEG/EOG/ECG "
"channels using digitization points.")
eeg_ch_names = [ch['ch_name'] for ch in info['chs']
if ch['kind'] == FIFF.FIFFV_EEG_CH]
# Get EEG digitization points
if info['dig'] is None or len(info['dig']) == 0:
raise RuntimeError('No digitization points found.')
locs3d = np.array([point['r'] for point in info['dig']
if point['kind'] == FIFF.FIFFV_POINT_EEG])
if len(locs3d) == 0:
raise RuntimeError('Did not find any digitization points of '
'kind FIFFV_POINT_EEG (%d) in the info.'
% FIFF.FIFFV_POINT_EEG)
if len(locs3d) != len(eeg_ch_names):
raise ValueError("Number of EEG digitization points (%d) "
"doesn't match the number of EEG channels "
"(%d)" % (len(locs3d), len(eeg_ch_names)))
# Center digitization points on head origin
dig_kinds = (FIFF.FIFFV_POINT_CARDINAL,
FIFF.FIFFV_POINT_EEG,
FIFF.FIFFV_POINT_EXTRA)
_, origin_head, _ = fit_sphere_to_headshape(info, dig_kinds, units='m')
locs3d -= origin_head
# Match the digitization points with the requested
# channels.
eeg_ch_locs = dict(zip(eeg_ch_names, locs3d))
locs3d = np.array([eeg_ch_locs[ch['ch_name']] for ch in chs])
# Duplicate points cause all kinds of trouble during visualization
dist = pdist(locs3d)
if np.min(dist) < 1e-10 and not ignore_overlap:
problematic_electrodes = [
chs[elec_i]['ch_name']
for elec_i in squareform(dist < 1e-10).any(axis=0).nonzero()[0]
]
raise ValueError('The following electrodes have overlapping positions:'
'\n ' + str(problematic_electrodes) + '\nThis '
'causes problems during visualization.')
if to_sphere:
# use spherical (theta, pol) as (r, theta) for polar->cartesian
return _pol_to_cart(_cart_to_sph(locs3d)[:, 1:][:, ::-1])
return _pol_to_cart(_cart_to_sph(locs3d))
def _topo_to_sphere(pos, eegs):
"""Transform xy-coordinates to sphere.
Parameters
----------
pos : array-like, shape (n_channels, 2)
xy-oordinates to transform.
eegs : list of int
Indices of eeg channels that are included when calculating the sphere.
Returns
-------
coords : array, shape (n_channels, 3)
xyz-coordinates.
"""
xs, ys = np.array(pos).T
sqs = np.max(np.sqrt((xs[eegs] ** 2) + (ys[eegs] ** 2)))
xs /= sqs # Shape to a sphere and normalize
ys /= sqs
xs += 0.5 - np.mean(xs[eegs]) # Center the points
ys += 0.5 - np.mean(ys[eegs])
xs = xs * 2. - 1. # Values ranging from -1 to 1
ys = ys * 2. - 1.
rs = np.clip(np.sqrt(xs ** 2 + ys ** 2), 0., 1.)
alphas = np.arccos(rs)
zs = np.sin(alphas)
return np.column_stack([xs, ys, zs])
def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude='bads',
raise_error=True):
"""Find the picks for pairing grad channels.
Parameters
----------
info : instance of Info
An info dictionary containing channel information.
layout : Layout | None
The layout if available. Defaults to None.
topomap_coords : bool
Return the coordinates for a topomap plot along with the picks. If
False, only picks are returned. Defaults to True.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in info['bads']. Defaults to 'bads'.
raise_error : bool
Whether to raise an error when no pairs are found. If False, raises a
warning.
Returns
-------
picks : array of int
Picks for the grad channels, ordered in pairs.
coords : array, shape = (n_grad_channels, 3)
Coordinates for a topomap plot (optional, only returned if
topomap_coords == True).
"""
# find all complete pairs of grad channels
pairs = defaultdict(list)
grad_picks = pick_types(info, meg='grad', ref_meg=False, exclude=exclude)
for i in grad_picks:
ch = info['chs'][i]
name = ch['ch_name']
if name.startswith('MEG'):
if name.endswith(('2', '3')):
key = name[-4:-1]
pairs[key].append(ch)
pairs = [p for p in pairs.values() if len(p) == 2]
if len(pairs) == 0:
if raise_error:
raise ValueError("No 'grad' channel pairs found.")
else:
warn("No 'grad' channel pairs found.")
return list()
# find the picks corresponding to the grad channels
grad_chs = sum(pairs, [])
ch_names = info['ch_names']
picks = [ch_names.index(c['ch_name']) for c in grad_chs]
if topomap_coords:
shape = (len(pairs), 2, -1)
coords = (_find_topomap_coords(info, picks, layout)
.reshape(shape).mean(axis=1))
return picks, coords
else:
return picks
# this function is used to pair grad when info is not present
# it is the case of Projection that don't have the info.
def _pair_grad_sensors_from_ch_names(ch_names):
"""Find the indexes for pairing grad channels.
Parameters
----------
ch_names : list of str
A list of channel names.
Returns
-------
indexes : list of int
Indexes of the grad channels, ordered in pairs.
"""
pairs = defaultdict(list)
for i, name in enumerate(ch_names):
if name.startswith('MEG'):
if name.endswith(('2', '3')):
key = name[-4:-1]
pairs[key].append(i)
pairs = [p for p in pairs.values() if len(p) == 2]
grad_chs = sum(pairs, [])
return grad_chs
def _merge_grad_data(data, method='rms'):
"""Merge data from channel pairs using the RMS or mean.
Parameters
----------
data : array, shape = (n_channels, n_times)
Data for channels, ordered in pairs.
method : str
Can be 'rms' or 'mean'.
Returns
-------
data : array, shape = (n_channels / 2, n_times)
The root mean square or mean for each pair.
"""
data = data.reshape((len(data) // 2, 2, -1))
if method == 'mean':
data = np.mean(data, axis=1)
elif method == 'rms':
data = np.sqrt(np.sum(data ** 2, axis=1) / 2)
else:
raise ValueError('method must be "rms" or "mean, got %s.' % method)
return data
def generate_2d_layout(xy, w=.07, h=.05, pad=.02, ch_names=None,
ch_indices=None, name='ecog', bg_image=None,
normalize=True):
"""Generate a custom 2D layout from xy points.
Generates a 2-D layout for plotting with plot_topo methods and
functions. XY points will be normalized between 0 and 1, where
normalization extremes will be either the min/max of xy, or
the width/height of bg_image.
Parameters
----------
xy : ndarray (N x 2)
The xy coordinates of sensor locations.
w : float
The width of each sensor's axis (between 0 and 1)
h : float
The height of each sensor's axis (between 0 and 1)
pad : float
Portion of the box to reserve for padding. The value can range between
0.0 (boxes will touch, default) to 1.0 (boxes consist of only padding).
ch_names : list
The names of each channel. Must be a list of strings, with one
string per channel.
ch_indices : list
Index of each channel - must be a collection of unique integers,
one index per channel.
name : string
The name of this layout type.
bg_image : str | ndarray
The image over which sensor axes will be plotted. Either a path to an
image file, or an array that can be plotted with plt.imshow. If
provided, xy points will be normalized by the width/height of this
image. If not, xy points will be normalized by their own min/max.
normalize : bool
Whether to normalize the coordinates to run from 0 to 1. Defaults to
True.
Returns
-------
layout : Layout
A Layout object that can be plotted with plot_topo
functions and methods.
See Also
--------
make_eeg_layout, make_grid_layout
Notes
-----
.. versionadded:: 0.9.0
"""
from scipy.ndimage import imread
if ch_indices is None:
ch_indices = np.arange(xy.shape[0])
if ch_names is None:
ch_names = ['{0}'.format(i) for i in ch_indices]
if len(ch_names) != len(ch_indices):
raise ValueError('# ch names and indices must be equal')
if len(ch_names) != len(xy):
raise ValueError('# ch names and xy vals must be equal')
x, y = xy.copy().astype(float).T
# Normalize xy to 0-1
if bg_image is not None:
# Normalize by image dimensions
if isinstance(bg_image, str):
img = imread(bg_image)
else:
img = bg_image
x /= img.shape[1]
y /= img.shape[0]
elif normalize:
# Normalize x and y by their maxes
for i_dim in [x, y]:
i_dim -= i_dim.min(0)
i_dim /= (i_dim.max(0) - i_dim.min(0))
# Create box and pos variable
box = _box_size(np.vstack([x, y]).T, padding=pad)
box = (0, 0, box[0], box[1])
w, h = [np.array([i] * x.shape[0]) for i in [w, h]]
loc_params = np.vstack([x, y, w, h]).T
layout = Layout(box, loc_params, ch_names, ch_indices, name)
return layout
| bsd-3-clause |
andreamad8/QDREN | bAbI/run_final.py | 1 | 2986 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
from src.utils.utils import Dataset, gen_embeddings
from src.EN import EntityNetwork
from src.trainer.train import train
import numpy as np
import tensorflow as tf
import tflearn
import random
import docopt
from sklearn.grid_search import ParameterGrid
import cPickle as pickle
import logging
from random import shuffle
def get_parameters(data,epoch,sent_len,sent_numb,embedding_size, params):
"""
create a random configuration from using dists to select random parameters
:return: neural network parameters for create_model
"""
embedding_file = 'data/glove.6B.{}d.txt'.format(embedding_size)
embeddings_mat = gen_embeddings(data._data['word_idx'], embedding_size, embedding_file)
dists = dict(
vocab_size = data._data["vocab_size"],label_num = data._data["vocab_size"],
sent_len = sent_len, sent_numb = sent_numb, embedding_size = embedding_size,
embeddings_mat = embeddings_mat, clip_gradients= 40.0,
max_norm = None, no_out = False, decay_steps = 0, decay_rate = 0, opt = 'Adam',
num_blocks = params['nb'],
learning_rate= params['lr'],
trainable = params['tr'],
L2 = params['L2']
)
return dists
def main(task_num,sample_size=''):
embedding_size = 100
epoch = 300
best_accuracy = 0.0
grind_ris={}
if not os.path.exists('data/ris/task_{}'.format(task_num)):
os.makedirs('data/ris/task_{}'.format(task_num))
param_grid = {'nb': [20],
'lr': [0.001],
'tr': [[0,0,0,0]],
'L2': [0.001],# [0.0,0.1,0.01,0.001,0.0001]
'bz': [32],
'dr': [0.5],
}
grid = list(ParameterGrid(param_grid))
np.random.shuffle(grid)
for params in list(grid):
data = Dataset('data/tasks_1-20_v1-2/en-valid{}/'.format(sample_size),int(task_num))
## for sentence
par = get_parameters(data,epoch,data._data['sent_len'],data._data['sent_numb'],embedding_size,params)
t = train(epoch,params['bz'], data, par, dr=params['dr'], _test=True)
acc = sorted([v for k,v in t[5].items()])[-1]
if (acc > best_accuracy):
best_accuracy = acc
grind_ris[str(params)] = acc
f_save = 'data/ris/task_{}/{}.PIK'.format(task_num,str(params)+str(acc))
with open(f_save, 'w') as f:
pickle.dump((t), f)
# batch_size = 32
# epoch = 200
# if not os.path.exists('data/ris/task_{}'.format(task_num)):
# os.makedirs('data/ris/task_{}'.format(task_num))
# data = Dataset('data/tasks_1-20_v1-2/en-valid{}/'.format(sample_size),int(task_num))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s', datefmt='%m-%d %H:%M')
main(task_num=sys.argv[1])
| mit |
natefoo/tools-iuc | tools/vsnp/vsnp_add_zero_coverage.py | 12 | 6321 | #!/usr/bin/env python
import argparse
import os
import re
import shutil
import pandas
import pysam
from Bio import SeqIO
def get_sample_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
return base_file_name
def get_coverage_df(bam_file):
# Create a coverage dictionary.
coverage_dict = {}
coverage_list = pysam.depth(bam_file, split_lines=True)
for line in coverage_list:
chrom, position, depth = line.split('\t')
coverage_dict["%s-%s" % (chrom, position)] = depth
# Convert it to a data frame.
coverage_df = pandas.DataFrame.from_dict(coverage_dict, orient='index', columns=["depth"])
return coverage_df
def get_zero_df(reference):
# Create a zero coverage dictionary.
zero_dict = {}
for record in SeqIO.parse(reference, "fasta"):
chrom = record.id
total_len = len(record.seq)
for pos in list(range(1, total_len + 1)):
zero_dict["%s-%s" % (str(chrom), str(pos))] = 0
# Convert it to a data frame with depth_x
# and depth_y columns - index is NaN.
zero_df = pandas.DataFrame.from_dict(zero_dict, orient='index', columns=["depth"])
return zero_df
def output_zc_vcf_file(base_file_name, vcf_file, zero_df, total_zero_coverage, output_vcf):
column_names = ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "Sample"]
vcf_df = pandas.read_csv(vcf_file, sep='\t', header=None, names=column_names, comment='#')
good_snp_count = len(vcf_df[(vcf_df['ALT'].str.len() == 1) & (vcf_df['REF'].str.len() == 1) & (vcf_df['QUAL'] > 150)])
if total_zero_coverage > 0:
header_file = "%s_header.csv" % base_file_name
with open(header_file, 'w') as outfile:
with open(vcf_file) as infile:
for line in infile:
if re.search('^#', line):
outfile.write("%s" % line)
vcf_df_snp = vcf_df[vcf_df['REF'].str.len() == 1]
vcf_df_snp = vcf_df_snp[vcf_df_snp['ALT'].str.len() == 1]
vcf_df_snp['ABS_VALUE'] = vcf_df_snp['CHROM'].map(str) + "-" + vcf_df_snp['POS'].map(str)
vcf_df_snp = vcf_df_snp.set_index('ABS_VALUE')
cat_df = pandas.concat([vcf_df_snp, zero_df], axis=1, sort=False)
cat_df = cat_df.drop(columns=['CHROM', 'POS', 'depth'])
cat_df[['ID', 'ALT', 'QUAL', 'FILTER', 'INFO']] = cat_df[['ID', 'ALT', 'QUAL', 'FILTER', 'INFO']].fillna('.')
cat_df['REF'] = cat_df['REF'].fillna('N')
cat_df['FORMAT'] = cat_df['FORMAT'].fillna('GT')
cat_df['Sample'] = cat_df['Sample'].fillna('./.')
cat_df['temp'] = cat_df.index.str.rsplit('-', n=1)
cat_df[['CHROM', 'POS']] = pandas.DataFrame(cat_df.temp.values.tolist(), index=cat_df.index)
cat_df = cat_df[['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', 'Sample']]
cat_df['POS'] = cat_df['POS'].astype(int)
cat_df = cat_df.sort_values(['CHROM', 'POS'])
body_file = "%s_body.csv" % base_file_name
cat_df.to_csv(body_file, sep='\t', header=False, index=False)
with open(output_vcf, "w") as outfile:
for cf in [header_file, body_file]:
with open(cf, "r") as infile:
for line in infile:
outfile.write("%s" % line)
else:
shutil.move(vcf_file, output_vcf)
return good_snp_count
def output_metrics_file(base_file_name, average_coverage, genome_coverage, good_snp_count, output_metrics):
bam_metrics = [base_file_name, "", "%4f" % average_coverage, genome_coverage]
vcf_metrics = [base_file_name, str(good_snp_count), "", ""]
metrics_columns = ["File", "Number of Good SNPs", "Average Coverage", "Genome Coverage"]
with open(output_metrics, "w") as fh:
fh.write("# %s\n" % "\t".join(metrics_columns))
fh.write("%s\n" % "\t".join(bam_metrics))
fh.write("%s\n" % "\t".join(vcf_metrics))
def output_files(vcf_file, total_zero_coverage, zero_df, output_vcf, average_coverage, genome_coverage, output_metrics):
base_file_name = get_sample_name(vcf_file)
good_snp_count = output_zc_vcf_file(base_file_name, vcf_file, zero_df, total_zero_coverage, output_vcf)
output_metrics_file(base_file_name, average_coverage, genome_coverage, good_snp_count, output_metrics)
def get_coverage_and_snp_count(bam_file, vcf_file, reference, output_metrics, output_vcf):
coverage_df = get_coverage_df(bam_file)
zero_df = get_zero_df(reference)
coverage_df = zero_df.merge(coverage_df, left_index=True, right_index=True, how='outer')
# depth_x "0" column no longer needed.
coverage_df = coverage_df.drop(columns=['depth_x'])
coverage_df = coverage_df.rename(columns={'depth_y': 'depth'})
# Covert the NaN to 0 coverage and get some metrics.
coverage_df = coverage_df.fillna(0)
coverage_df['depth'] = coverage_df['depth'].apply(int)
total_length = len(coverage_df)
average_coverage = coverage_df['depth'].mean()
zero_df = coverage_df[coverage_df['depth'] == 0]
total_zero_coverage = len(zero_df)
total_coverage = total_length - total_zero_coverage
genome_coverage = "{:.2%}".format(total_coverage / total_length)
# Output a zero-coverage vcf fil and the metrics file.
output_files(vcf_file, total_zero_coverage, zero_df, output_vcf, average_coverage, genome_coverage, output_metrics)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bam_input', action='store', dest='bam_input', help='bam input file')
parser.add_argument('--output_metrics', action='store', dest='output_metrics', required=False, default=None, help='Output metrics text file')
parser.add_argument('--output_vcf', action='store', dest='output_vcf', required=False, default=None, help='Output VCF file')
parser.add_argument('--reference', action='store', dest='reference', help='Reference dataset')
parser.add_argument('--vcf_input', action='store', dest='vcf_input', help='vcf input file')
args = parser.parse_args()
get_coverage_and_snp_count(args.bam_input, args.vcf_input, args.reference, args.output_metrics, args.output_vcf)
| mit |
JuBra/cobrapy | cobra/test/flux_analysis.py | 1 | 13461 | from unittest import TestCase, TestLoader, TextTestRunner, skipIf
from warnings import warn
import sys
from os.path import join
from json import load
from contextlib import contextmanager
import re
from six import iteritems, StringIO
try:
import numpy
except:
numpy = None
try:
import matplotlib
except:
matplotlib = None
try:
import pandas
except:
pandas = None
if __name__ == "__main__":
sys.path.insert(0, "../..")
from cobra.test import create_test_model, data_directory
from cobra import Model, Reaction, Metabolite
from cobra.manipulation import initialize_growth_medium
from cobra.solvers import solver_dict, get_solver_name
from cobra.flux_analysis import *
sys.path.pop(0)
else:
from . import create_test_model, data_directory
from .. import Model, Reaction, Metabolite
from ..manipulation import initialize_growth_medium
from ..solvers import solver_dict, get_solver_name
from ..flux_analysis import *
@contextmanager
def captured_output():
""" A context manager to test the IO summary methods """
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class TestCobraFluxAnalysis(TestCase):
"""Test the simulation functions in cobra.flux_analysis"""
def setUp(self):
pass
def test_pFBA(self):
model = create_test_model("textbook")
for solver in solver_dict:
optimize_minimal_flux(model, solver=solver)
abs_x = [abs(i) for i in model.solution.x]
self.assertEqual(model.solution.status, "optimal")
self.assertAlmostEqual(model.solution.f, 0.8739, places=3)
self.assertAlmostEqual(sum(abs_x), 518.4221, places=3)
def test_single_gene_deletion(self):
cobra_model = create_test_model("textbook")
# expected knockouts for textbook model
growth_dict = {"fba": {"b0008": 0.87, "b0114": 0.80, "b0116": 0.78,
"b2276": 0.21, "b1779": 0.00},
"moma": {"b0008": 0.87, "b0114": 0.71, "b0116": 0.56,
"b2276": 0.11, "b1779": 0.00},
}
# MOMA requires cplex or gurobi
try:
get_solver_name(qp=True)
except:
growth_dict.pop('moma')
for method, expected in growth_dict.items():
rates, statuses = single_gene_deletion(cobra_model,
gene_list=expected.keys(),
method=method)
for gene, expected_value in iteritems(expected):
self.assertEqual(statuses[gene], 'optimal')
self.assertAlmostEqual(rates[gene], expected_value, places=2)
def test_single_reaction_deletion(self):
cobra_model = create_test_model("textbook")
expected_results = {'FBA': 0.70404, 'FBP': 0.87392, 'CS': 0,
'FUM': 0.81430, 'GAPD': 0, 'GLUDy': 0.85139}
results, status = single_reaction_deletion(
cobra_model, reaction_list=expected_results.keys())
self.assertEqual(len(results), 6)
self.assertEqual(len(status), 6)
for status_value in status.values():
self.assertEqual(status_value, "optimal")
for reaction, value in results.items():
self.assertAlmostEqual(value, expected_results[reaction], 5)
def compare_matrices(self, matrix1, matrix2, places=3):
nrows = len(matrix1)
ncols = len(matrix1[0])
self.assertEqual(nrows, len(matrix2))
self.assertEqual(ncols, len(matrix2[0]))
for i in range(nrows):
for j in range(ncols):
self.assertAlmostEqual(matrix1[i][j], matrix2[i][j],
places=places)
@skipIf(numpy is None, "double deletions require numpy")
def test_double_gene_deletion(self):
cobra_model = create_test_model("textbook")
genes = ["b0726", "b4025", "b0724", "b0720",
"b2935", "b2935", "b1276", "b1241"]
growth_list = [
[0.858, 0.857, 0.814, 0.000, 0.858, 0.858, 0.858, 0.858],
[0.857, 0.863, 0.739, 0.000, 0.863, 0.863, 0.863, 0.863],
[0.814, 0.739, 0.814, 0.000, 0.814, 0.814, 0.814, 0.814],
[0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000],
[0.858, 0.863, 0.814, 0.000, 0.874, 0.874, 0.874, 0.874],
[0.858, 0.863, 0.814, 0.000, 0.874, 0.874, 0.874, 0.874],
[0.858, 0.863, 0.814, 0.000, 0.874, 0.874, 0.874, 0.874],
[0.858, 0.863, 0.814, 0.000, 0.874, 0.874, 0.874, 0.874]]
solution = double_gene_deletion(cobra_model, gene_list1=genes)
self.assertEqual(solution["x"], genes)
self.assertEqual(solution["y"], genes)
self.compare_matrices(growth_list, solution["data"])
# test when lists differ slightly
solution = double_gene_deletion(cobra_model, gene_list1=genes[:-1],
gene_list2=genes,
number_of_processes=1)
self.assertEqual(solution["x"], genes[:-1])
self.assertEqual(solution["y"], genes)
self.compare_matrices(growth_list[:-1], solution["data"])
@skipIf(numpy is None, "double deletions require numpy")
def test_double_reaction_deletion(self):
cobra_model = create_test_model("textbook")
reactions = ['FBA', 'ATPS4r', 'ENO', 'FRUpts2']
growth_list = [[0.704, 0.135, 0.000, 0.704],
[0.135, 0.374, 0.000, 0.374],
[0.000, 0.000, 0.000, 0.000],
[0.704, 0.374, 0.000, 0.874]]
solution = double_reaction_deletion(cobra_model,
reaction_list1=reactions,
number_of_processes=1)
self.assertEqual(solution["x"], reactions)
self.assertEqual(solution["y"], reactions)
self.compare_matrices(growth_list, solution["data"])
def test_flux_variability(self):
with open(join(data_directory, "textbook_fva.json"), "r") as infile:
fva_results = load(infile)
infeasible_model = create_test_model("textbook")
infeasible_model.reactions.get_by_id("EX_glc__D_e").lower_bound = 0
for solver in solver_dict:
# esolver is really slow
if solver == "esolver":
continue
cobra_model = create_test_model("textbook")
fva_out = flux_variability_analysis(
cobra_model, solver=solver,
reaction_list=cobra_model.reactions[1::3])
for name, result in iteritems(fva_out):
for k, v in iteritems(result):
self.assertAlmostEqual(fva_results[name][k], v, places=5)
# ensure that an infeasible model does not run FVA
self.assertRaises(ValueError, flux_variability_analysis,
infeasible_model, solver=solver)
def test_find_blocked_reactions(self):
m = create_test_model("textbook")
result = find_blocked_reactions(m, m.reactions[40:46])
self.assertEqual(result, ['FRUpts2'])
result = find_blocked_reactions(m, m.reactions[42:48])
self.assertEqual(set(result), {'FUMt2_2', 'FRUpts2'})
result = find_blocked_reactions(m, m.reactions[30:50],
open_exchanges=True)
self.assertEqual(result, [])
def test_loopless(self):
try:
solver = get_solver_name(mip=True)
except:
self.skipTest("no MILP solver found")
test_model = Model()
test_model.add_metabolites(Metabolite("A"))
test_model.add_metabolites(Metabolite("B"))
test_model.add_metabolites(Metabolite("C"))
EX_A = Reaction("EX_A")
EX_A.add_metabolites({test_model.metabolites.A: 1})
DM_C = Reaction("DM_C")
DM_C.add_metabolites({test_model.metabolites.C: -1})
v1 = Reaction("v1")
v1.add_metabolites({test_model.metabolites.A: -1,
test_model.metabolites.B: 1})
v2 = Reaction("v2")
v2.add_metabolites({test_model.metabolites.B: -1,
test_model.metabolites.C: 1})
v3 = Reaction("v3")
v3.add_metabolites({test_model.metabolites.C: -1,
test_model.metabolites.A: 1})
DM_C.objective_coefficient = 1
test_model.add_reactions([EX_A, DM_C, v1, v2, v3])
feasible_sol = construct_loopless_model(test_model).optimize()
v3.lower_bound = 1
infeasible_sol = construct_loopless_model(test_model).optimize()
self.assertEqual(feasible_sol.status, "optimal")
self.assertEqual(infeasible_sol.status, "infeasible")
def test_gapfilling(self):
try:
solver = get_solver_name(mip=True)
except:
self.skipTest("no MILP solver found")
m = Model()
m.add_metabolites(map(Metabolite, ["a", "b", "c"]))
r = Reaction("EX_A")
m.add_reaction(r)
r.add_metabolites({m.metabolites.a: 1})
r = Reaction("r1")
m.add_reaction(r)
r.add_metabolites({m.metabolites.b: -1, m.metabolites.c: 1})
r = Reaction("DM_C")
m.add_reaction(r)
r.add_metabolites({m.metabolites.c: -1})
r.objective_coefficient = 1
U = Model()
r = Reaction("a2b")
U.add_reaction(r)
r.build_reaction_from_string("a --> b", verbose=False)
r = Reaction("a2d")
U.add_reaction(r)
r.build_reaction_from_string("a --> d", verbose=False)
result = gapfilling.growMatch(m, U)[0]
self.assertEqual(len(result), 1)
self.assertEqual(result[0].id, "a2b")
# 2 rounds with exchange reactions
result = gapfilling.growMatch(m, None, ex_rxns=True, iterations=2)
self.assertEqual(len(result), 2)
self.assertEqual(len(result[0]), 1)
self.assertEqual(len(result[1]), 1)
self.assertEqual({i[0].id for i in result},
{"SMILEY_EX_b", "SMILEY_EX_c"})
@skipIf(numpy is None, "phase plane requires numpy")
def test_phenotype_phase_plane(self):
model = create_test_model("textbook")
data = calculate_phenotype_phase_plane(
model, "EX_glc__D_e", "EX_o2_e",
reaction1_npoints=20, reaction2_npoints=20)
self.assertEqual(data.growth_rates.shape, (20, 20))
self.assertAlmostEqual(data.growth_rates.max(), 1.20898, places=4)
self.assertAlmostEqual(abs(data.growth_rates[0, :]).max(), 0, places=4)
if matplotlib is None:
self.skipTest("can't test plots without matplotlib")
data.plot()
@skipIf(pandas is None, "summary methods require pandas")
def test_summary_methods(self):
# Test model summary methods
model = create_test_model("textbook")
model.optimize()
desired_entries = [
u'glc__D_e -9.76 \u00B1 0.24'
u'co2_e 21.81 \u00B1 2.86',
u'nh4_e -4.84 \u00B1 0.32'
u'h_e 19.51 \u00B1 2.86',
u'pi_e -3.13 \u00B1 0.08'
u'for_e 2.86 \u00B1 2.86',
u'ac_e 0.95 \u00B1 0.95',
u'acald_e 0.64 \u00B1 0.64',
u'pyr_e 0.64 \u00B1 0.64',
u'etoh_e 0.55 \u00B1 0.55',
u'lac__D_e 0.54 \u00B1 0.54',
u'succ_e 0.42 \u00B1 0.42',
u'akg_e 0.36 \u00B1 0.36',
u'glu__L_e 0.32 \u00B1 0.32']
with captured_output() as (out, err):
model.summary(fva=0.95)
output = out.getvalue().strip()
output_set = set((re.sub('\s', '', l) for l in output.splitlines()))
for item in desired_entries:
self.assertIn(re.sub('\s', '', item), output_set)
# Test metabolite summary methods
desired_entries = [
'PRODUCING REACTIONS -- Ubiquinone-8',
'-----------------------------------',
'% FLUX RXN ID'
'REACTION',
'100.0% 44 CYTBD'
'2.0 h_c + 0.5 o2_c + q8h2_c --> h2o_c + 2.0 h_e +...',
'CONSUMING REACTIONS -- Ubiquinone-8',
'-----------------------------------',
'88.4% -39 NADH16'
'4.0 h_c + nadh_c + q8_c --> 3.0 h_e + nad_c + q8h2_c',
'11.6% -5.1 SUCDi '
'q8_c + succ_c --> fum_c + q8h2_c',
]
with captured_output() as (out, err):
model.metabolites.q8_c.summary()
output = out.getvalue().strip()
output_set = set((re.sub('\s', '', l) for l in output.splitlines()))
for item in desired_entries:
self.assertIn(re.sub('\s', '', item), output_set)
# make a test suite to run all of the tests
loader = TestLoader()
suite = loader.loadTestsFromModule(sys.modules[__name__])
def test_all():
TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
test_all()
| lgpl-2.1 |
mlperf/training_results_v0.5 | v0.5.0/google/cloud_v3.8/resnet-tpuv3-8/code/resnet/model/staging/models/rough/nmt/nmt.py | 2 | 28086 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow NMT model implementation."""
from __future__ import print_function
import argparse
import os
import random
import sys
# import matplotlib.image as mpimg
import numpy as np
import tensorflow as tf
from tensorflow.contrib.training.python.training import evaluation
from mlperf_compliance import mlperf_log
import distributed_iterator_utils
import estimator
from utils import misc_utils as utils
from utils import vocab_utils
utils.check_tensorflow_version()
FLAGS = None
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
# network
parser.add_argument(
"--num_units", type=int, default=1024, help="Network size.")
parser.add_argument(
"--num_layers", type=int, default=4, help="Network depth.")
parser.add_argument("--num_encoder_layers", type=int, default=None,
help="Encoder depth, equal to num_layers if None.")
parser.add_argument("--num_decoder_layers", type=int, default=None,
help="Decoder depth, equal to num_layers if None.")
parser.add_argument(
"--encoder_type",
type=str,
default="gnmt",
help="""\
uni | bi | gnmt.
For bi, we build num_encoder_layers/2 bi-directional layers.
For gnmt, we build 1 bi-directional layer, and (num_encoder_layers - 1)
uni-directional layers.\
""")
parser.add_argument(
"--residual",
type="bool",
nargs="?",
const=True,
default=True,
help="Whether to add residual connections.")
parser.add_argument("--time_major", type="bool", nargs="?", const=True,
default=True,
help="Whether to use time-major mode for dynamic RNN.")
parser.add_argument("--num_embeddings_partitions", type=int, default=0,
help="Number of partitions for embedding vars.")
# attention mechanisms
parser.add_argument(
"--attention",
type=str,
default="normed_bahdanau",
help="""\
luong | scaled_luong | bahdanau | normed_bahdanau or set to "" for no
attention\
""")
parser.add_argument(
"--attention_architecture",
type=str,
default="gnmt_v2",
help="""\
standard | gnmt | gnmt_v2.
standard: use top layer to compute attention.
gnmt: GNMT style of computing attention, use previous bottom layer to
compute attention.
gnmt_v2: similar to gnmt, but use current bottom layer to compute
attention.\
""")
parser.add_argument(
"--output_attention", type="bool", nargs="?", const=True,
default=True,
help="""\
Only used in standard attention_architecture. Whether use attention as
the cell output at each timestep.
.\
""")
parser.add_argument(
"--pass_hidden_state", type="bool", nargs="?", const=True,
default=True,
help="""\
Whether to pass encoder's hidden state to decoder when using an attention
based model.\
""")
# optimizer
parser.add_argument(
"--optimizer", type=str, default="adam", help="sgd | adam")
parser.add_argument(
"--learning_rate",
type=float,
default=0.001,
help="Learning rate. Adam: 0.001 | 0.0001")
parser.add_argument("--warmup_steps", type=int, default=0,
help="How many steps we inverse-decay learning.")
parser.add_argument("--warmup_scheme", type=str, default="t2t", help="""\
How to warmup learning rates. Options include:
t2t: Tensor2Tensor's way, start with lr 100 times smaller, then
exponentiate until the specified lr.\
""")
parser.add_argument(
"--decay_scheme", type=str, default="", help="""\
How we decay learning rate. Options include:
luong234: after 2/3 num train steps, we start halving the learning rate
for 4 times before finishing.
luong5: after 1/2 num train steps, we start halving the learning rate
for 5 times before finishing.\
luong10: after 1/2 num train steps, we start halving the learning rate
for 10 times before finishing.\
""")
parser.add_argument(
"--max_train_epochs", type=int, default=8,
help="Maximum number of training epochs.")
parser.add_argument("--num_examples_per_epoch", type=int, default=4067649,
help="Number of examples in one epoch")
parser.add_argument("--colocate_gradients_with_ops", type="bool", nargs="?",
const=True,
default=True,
help=("Whether try colocating gradients with "
"corresponding op"))
parser.add_argument("--label_smoothing", type=float, default=0.1,
help=("If nonzero, smooth the labels towards "
"1/num_classes."))
# initializer
parser.add_argument("--init_op", type=str, default="uniform",
help="uniform | glorot_normal | glorot_uniform")
parser.add_argument("--init_weight", type=float, default=0.1,
help=("for uniform init_op, initialize weights "
"between [-this, this]."))
# data
parser.add_argument(
"--src", type=str, default="en", help="Source suffix, e.g., en.")
parser.add_argument(
"--tgt", type=str, default="de", help="Target suffix, e.g., de.")
parser.add_argument(
"--data_dir", type=str, default="", help="Training/eval data directory.")
parser.add_argument(
"--train_prefix",
type=str,
default="train.tok.clean.bpe.32000",
help="Train prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--test_prefix",
type=str,
default="newstest2014.tok.bpe.32000.padded",
help="Test prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--out_dir", type=str, default=None, help="Store log/model files.")
# Vocab
parser.add_argument(
"--vocab_prefix",
type=str,
default="vocab.bpe.32000",
help="""\
Vocab prefix, expect files with src/tgt suffixes.\
""")
parser.add_argument(
"--embed_prefix",
type=str,
default=None,
help="""\
Pretrained embedding prefix, expect files with src/tgt suffixes.
The embedding files should be Glove formatted txt files.\
""")
parser.add_argument("--sos", type=str, default="<s>",
help="Start-of-sentence symbol.")
parser.add_argument("--eos", type=str, default="</s>",
help="End-of-sentence symbol.")
parser.add_argument(
"--share_vocab",
type="bool",
nargs="?",
const=True,
default=True,
help="""\
Whether to use the source vocab and embeddings for both source and
target.\
""")
parser.add_argument("--check_special_token", type="bool", default=True,
help="""\
Whether check special sos, eos, unk tokens exist in the
vocab files.\
""")
# Sequence lengths
parser.add_argument(
"--src_max_len",
type=int,
default=128,
help="Max length of src sequences during training.")
parser.add_argument(
"--tgt_max_len",
type=int,
default=128,
help="Max length of tgt sequences during training.")
parser.add_argument("--src_max_len_infer", type=int, default=None,
help="Max length of src sequences during inference.")
parser.add_argument("--tgt_max_len_infer", type=int, default=80,
help="""\
Max length of tgt sequences during inference. Also use to restrict the
maximum decoding length.\
""")
# Default settings works well (rarely need to change)
parser.add_argument("--unit_type", type=str, default="lstm",
help="lstm | gru | layer_norm_lstm | nas")
parser.add_argument("--forget_bias", type=float, default=1.0,
help="Forget bias for BasicLSTMCell.")
parser.add_argument("--dropout", type=float, default=0.2,
help="Dropout rate (not keep_prob)")
parser.add_argument("--max_gradient_norm", type=float, default=5.0,
help="Clip gradients to this norm.")
parser.add_argument("--batch_size", type=int, default=512, help="Batch size.")
parser.add_argument("--steps_per_stats", type=int, default=5,
help=("How many training steps to do per stats logging."
"Save checkpoint every 10x steps_per_stats"))
parser.add_argument(
"--num_buckets",
type=int,
default=5,
help="Put data into similar-length buckets.")
parser.add_argument(
"--choose_buckets",
type=int,
default=None,
help="Choose from this number of length buckets per training step.")
parser.add_argument("--num_sampled_softmax", type=int, default=0,
help=("Use sampled_softmax_loss if > 0."
"Otherwise, use full softmax loss."))
# SPM
parser.add_argument("--subword_option", type=str, default="bpe",
choices=["", "bpe", "spm"],
help="""\
Set to bpe or spm to activate subword desegmentation.\
""")
# Experimental encoding feature.
parser.add_argument("--use_char_encode", type="bool", default=False,
help="""\
Whether to split each word or bpe into character, and then
generate the word-level representation from the character
reprentation.
""")
# Misc
parser.add_argument(
"--num_gpus", type=int, default=4, help="Number of gpus in each worker.")
parser.add_argument(
"--num_tpu_workers",
type=int,
default=None,
help="Number of TPU workers; if set, uses the distributed-sync pipeline.")
parser.add_argument(
"--log_device_placement",
type="bool",
nargs="?",
const=True,
default=True,
help="Debug GPU allocation.")
parser.add_argument("--scope", type=str, default=None,
help="scope to put variables under")
parser.add_argument("--hparams_path", type=str, default=None,
help=("Path to standard hparams json file that overrides"
"hparams values from FLAGS."))
parser.add_argument(
"--random_seed",
type=int,
default=1,
help="Random seed (>0, set a specific seed).")
parser.add_argument("--override_loaded_hparams", type="bool", nargs="?",
const=True, default=False,
help="Override loaded hparams with values specified")
parser.add_argument("--language_model", type="bool", nargs="?",
const=True, default=False,
help="True to train a language model, ignoring encoder")
# Inference
parser.add_argument("--ckpt", type=str, default="",
help="Checkpoint file to load a model for inference.")
parser.add_argument(
"--infer_batch_size",
type=int,
default=512,
help="Batch size for inference mode.")
parser.add_argument(
"--examples_to_infer",
type=int,
default=3003,
help="Number of examples to infer.")
parser.add_argument("--detokenizer_file", type=str,
default="mosesdecoder/scripts/tokenizer/detokenizer.perl",
help=("""Detokenizer script file."""))
parser.add_argument("--use_borg", type=bool, default=False)
parser.add_argument("--target_bleu", type=float, default=22.0,
help="Target accuracy.")
# Advanced inference arguments
parser.add_argument("--infer_mode", type=str, default="beam_search",
choices=["greedy", "sample", "beam_search"],
help="Which type of decoder to use during inference.")
parser.add_argument("--beam_width", type=int, default=5,
help=("""\
beam width when using beam search decoder. If 0 (default), use standard
decoder with greedy helper.\
"""))
parser.add_argument(
"--length_penalty_weight",
type=float,
default=0.6,
help="Length penalty for beam search.")
parser.add_argument(
"--coverage_penalty_weight",
type=float,
default=0.1,
help="Coverage penalty for beam search.")
parser.add_argument("--sampling_temperature", type=float,
default=0.0,
help=("""\
Softmax sampling temperature for inference decoding, 0.0 means greedy
decoding. This option is ignored when using beam search.\
"""))
# Job info
parser.add_argument("--jobid", type=int, default=0,
help="Task id of the worker.")
# TPU
parser.add_argument("--use_tpu", type=bool, default=True)
parser.add_argument("--master", type=str, default="",
help=("Address of the master. Either --master or "
"--tpu_name must be specified."))
parser.add_argument("--tpu_name", type=str, default=None,
help=("Name of the TPU for Cluster Resolvers. Either "
"--tpu_name or --master must be specified."))
parser.add_argument("--use_dynamic_rnn", type=bool, default=False)
parser.add_argument("--use_synthetic_data", type=bool, default=False)
parser.add_argument(
"--mode", type=str, default="train_and_eval",
choices=["train", "train_and_eval", "infer"])
parser.add_argument("--activation_dtype", type=str, default="float32",
choices=["float32", "bfloat16"])
parser.add_argument("--use_async_checkpoint", type=bool, default=True)
def create_hparams(flags):
"""Create training hparams."""
return tf.contrib.training.HParams(
# Data
src=flags.src,
tgt=flags.tgt,
train_prefix=flags.data_dir + flags.train_prefix,
test_prefix=flags.data_dir + flags.test_prefix,
vocab_prefix=flags.data_dir + flags.vocab_prefix,
embed_prefix=flags.embed_prefix,
out_dir=flags.out_dir,
# Networks
num_units=flags.num_units,
num_encoder_layers=(flags.num_encoder_layers or flags.num_layers),
num_decoder_layers=(flags.num_decoder_layers or flags.num_layers),
dropout=flags.dropout,
unit_type=flags.unit_type,
encoder_type=flags.encoder_type,
residual=flags.residual,
time_major=flags.time_major,
num_embeddings_partitions=flags.num_embeddings_partitions,
# Attention mechanisms
attention=flags.attention,
attention_architecture=flags.attention_architecture,
output_attention=flags.output_attention,
pass_hidden_state=flags.pass_hidden_state,
# Train
optimizer=flags.optimizer,
max_train_epochs=flags.max_train_epochs,
num_examples_per_epoch=flags.num_examples_per_epoch,
batch_size=flags.batch_size,
num_train_steps=int(flags.num_examples_per_epoch / flags.batch_size *
flags.max_train_epochs),
init_op=flags.init_op,
init_weight=flags.init_weight,
max_gradient_norm=flags.max_gradient_norm,
learning_rate=flags.learning_rate,
label_smoothing=flags.label_smoothing,
warmup_steps=flags.warmup_steps,
warmup_scheme=flags.warmup_scheme,
decay_scheme=flags.decay_scheme,
colocate_gradients_with_ops=flags.colocate_gradients_with_ops,
num_sampled_softmax=flags.num_sampled_softmax,
# Data constraints
num_buckets=flags.num_buckets,
choose_buckets=flags.choose_buckets,
src_max_len=flags.src_max_len,
tgt_max_len=flags.tgt_max_len,
# Inference
src_max_len_infer=flags.src_max_len_infer,
tgt_max_len_infer=flags.tgt_max_len_infer,
infer_batch_size=flags.infer_batch_size,
examples_to_infer=flags.examples_to_infer,
detokenizer_file=flags.data_dir + flags.detokenizer_file,
use_borg=flags.use_borg,
target_bleu=flags.target_bleu,
# Advanced inference arguments
infer_mode=flags.infer_mode,
beam_width=flags.beam_width,
length_penalty_weight=flags.length_penalty_weight,
coverage_penalty_weight=flags.coverage_penalty_weight,
sampling_temperature=flags.sampling_temperature,
# Vocab
sos=flags.sos if flags.sos else vocab_utils.SOS,
eos=flags.eos if flags.eos else vocab_utils.EOS,
subword_option=flags.subword_option,
check_special_token=flags.check_special_token,
use_char_encode=flags.use_char_encode,
# Misc
forget_bias=flags.forget_bias,
num_gpus=flags.num_gpus,
epoch_step=0, # record where we were within an epoch.
steps_per_stats=flags.steps_per_stats,
share_vocab=flags.share_vocab,
log_device_placement=flags.log_device_placement,
random_seed=flags.random_seed,
override_loaded_hparams=flags.override_loaded_hparams,
language_model=flags.language_model,
# TPU
use_tpu=flags.use_tpu,
master=flags.master,
tpu_name=flags.tpu_name,
use_dynamic_rnn=flags.use_dynamic_rnn,
use_synthetic_data=flags.use_synthetic_data,
mode=flags.mode,
activation_dtype=flags.activation_dtype,
use_async_checkpoint=flags.use_async_checkpoint)
def _add_argument(hparams, key, value, update=True):
"""Add an argument to hparams; if exists, change the value if update==True."""
if hasattr(hparams, key):
if update:
setattr(hparams, key, value)
else:
hparams.add_hparam(key, value)
def extend_hparams(hparams):
"""Add new arguments to hparams."""
# Sanity checks
if hparams.encoder_type == "bi" and hparams.num_encoder_layers % 2 != 0:
raise ValueError("For bi, num_encoder_layers %d should be even" %
hparams.num_encoder_layers)
if (hparams.attention_architecture in ["gnmt"] and
hparams.num_encoder_layers < 2):
raise ValueError("For gnmt attention architecture, "
"num_encoder_layers %d should be >= 2" %
hparams.num_encoder_layers)
if hparams.subword_option and hparams.subword_option not in ["spm", "bpe"]:
raise ValueError("subword option must be either spm, or bpe")
if hparams.infer_mode == "beam_search" and hparams.beam_width <= 0:
raise ValueError("beam_width must greater than 0 when using beam_search"
"decoder.")
if hparams.infer_mode == "sample" and hparams.sampling_temperature <= 0.0:
raise ValueError("sampling_temperature must greater than 0.0 when using"
"sample decoder.")
# Different number of encoder / decoder layers
assert hparams.num_encoder_layers and hparams.num_decoder_layers
if hparams.num_encoder_layers != hparams.num_decoder_layers:
hparams.pass_hidden_state = False
utils.print_out("Num encoder layer %d is different from num decoder layer"
" %d, so set pass_hidden_state to False" % (
hparams.num_encoder_layers,
hparams.num_decoder_layers))
# Set residual layers
num_encoder_residual_layers = 0
num_decoder_residual_layers = 0
if hparams.residual:
if hparams.num_encoder_layers > 1:
num_encoder_residual_layers = hparams.num_encoder_layers - 1
if hparams.num_decoder_layers > 1:
num_decoder_residual_layers = hparams.num_decoder_layers - 1
if hparams.encoder_type == "gnmt":
# The first unidirectional layer (after the bi-directional layer) in
# the GNMT encoder can't have residual connection due to the input is
# the concatenation of fw_cell and bw_cell's outputs.
num_encoder_residual_layers = hparams.num_encoder_layers - 2
# Compatible for GNMT models
if hparams.num_encoder_layers == hparams.num_decoder_layers:
num_decoder_residual_layers = num_encoder_residual_layers
_add_argument(hparams, "num_encoder_residual_layers",
num_encoder_residual_layers)
_add_argument(hparams, "num_decoder_residual_layers",
num_decoder_residual_layers)
# Language modeling
if hparams.language_model:
hparams.attention = ""
hparams.attention_architecture = ""
hparams.pass_hidden_state = False
hparams.share_vocab = True
hparams.src = hparams.tgt
utils.print_out("For language modeling, we turn off attention and "
"pass_hidden_state; turn on share_vocab; set src to tgt.")
## Vocab
# Get vocab file names first
if hparams.vocab_prefix:
src_vocab_file = hparams.vocab_prefix + "." + hparams.src
tgt_vocab_file = hparams.vocab_prefix + "." + hparams.tgt
else:
raise ValueError("hparams.vocab_prefix must be provided.")
# Source vocab
src_vocab_size, src_vocab_file = vocab_utils.check_vocab(
src_vocab_file,
hparams.out_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
# Target vocab
if hparams.share_vocab:
utils.print_out(" using source vocab for target")
tgt_vocab_file = src_vocab_file
tgt_vocab_size = src_vocab_size
else:
tgt_vocab_size, tgt_vocab_file = vocab_utils.check_vocab(
tgt_vocab_file,
hparams.out_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
mlperf_log.gnmt_print(key=mlperf_log.PREPROC_VOCAB_SIZE,
value={"src": src_vocab_size, "tgt": tgt_vocab_size})
_add_argument(hparams, "src_vocab_size", src_vocab_size)
_add_argument(hparams, "tgt_vocab_size", tgt_vocab_size)
_add_argument(hparams, "src_vocab_file", src_vocab_file)
_add_argument(hparams, "tgt_vocab_file", tgt_vocab_file)
# Num embedding partitions
_add_argument(
hparams, "num_enc_emb_partitions", hparams.num_embeddings_partitions)
_add_argument(
hparams, "num_dec_emb_partitions", hparams.num_embeddings_partitions)
# Pretrained Embeddings
_add_argument(hparams, "src_embed_file", "")
_add_argument(hparams, "tgt_embed_file", "")
if hparams.embed_prefix:
src_embed_file = hparams.embed_prefix + "." + hparams.src
tgt_embed_file = hparams.embed_prefix + "." + hparams.tgt
if tf.gfile.Exists(src_embed_file):
utils.print_out(" src_embed_file %s exist" % src_embed_file)
hparams.src_embed_file = src_embed_file
utils.print_out(
"For pretrained embeddings, set num_enc_emb_partitions to 1")
hparams.num_enc_emb_partitions = 1
else:
utils.print_out(" src_embed_file %s doesn't exist" % src_embed_file)
if tf.gfile.Exists(tgt_embed_file):
utils.print_out(" tgt_embed_file %s exist" % tgt_embed_file)
hparams.tgt_embed_file = tgt_embed_file
utils.print_out(
"For pretrained embeddings, set num_dec_emb_partitions to 1")
hparams.num_dec_emb_partitions = 1
else:
utils.print_out(" tgt_embed_file %s doesn't exist" % tgt_embed_file)
return hparams
def create_or_load_hparams(default_hparams, hparams_path):
"""Create hparams or load hparams from out_dir."""
hparams = utils.maybe_parse_standard_hparams(default_hparams, hparams_path)
hparams = extend_hparams(hparams)
# Print HParams
utils.print_hparams(hparams)
return hparams
def run_main(flags, default_hparams, estimator_fn):
"""Run main."""
# Job
jobid = flags.jobid
utils.print_out("# Job id %d" % jobid)
# Random
random_seed = flags.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed + jobid)
np.random.seed(random_seed + jobid)
tf.set_random_seed(random_seed)
# Model output directory
out_dir = flags.out_dir
if out_dir and not tf.gfile.Exists(out_dir):
utils.print_out("# Creating output directory %s ..." % out_dir)
tf.gfile.MakeDirs(out_dir)
# Load hparams.
hparams = create_or_load_hparams(default_hparams, flags.hparams_path)
# Train or Evaluation
return estimator_fn(hparams)
def main(unused_argv):
# pylint: disable=g-long-lambda
if FLAGS.mode == "train":
print("Running training mode.")
mlperf_log.gnmt_print(key=mlperf_log.RUN_START)
default_hparams = create_hparams(FLAGS)
if FLAGS.num_tpu_workers:
_ = run_main(
FLAGS, default_hparams,
lambda hparams: distributed_iterator_utils.train_fn(
hparams, FLAGS.num_tpu_workers))
else:
_ = run_main(FLAGS, default_hparams, estimator.train_fn)
elif FLAGS.mode == "train_and_eval":
print("Running training and evaluation mode.")
mlperf_log.gnmt_print(key=mlperf_log.RUN_START)
default_hparams = create_hparams(FLAGS)
if FLAGS.num_tpu_workers:
_ = run_main(
FLAGS, default_hparams,
lambda hparams: distributed_iterator_utils.train_and_eval_fn(
hparams, FLAGS.num_tpu_workers))
else:
_ = run_main(FLAGS, default_hparams, estimator.train_and_eval_fn)
mlperf_log.gnmt_print(key=mlperf_log.RUN_FINAL)
else:
print("Running inference mode.")
default_hparams = create_hparams(FLAGS)
current_epoch = 0
mlperf_log.gnmt_print(key=mlperf_log.EVAL_TARGET,
value=default_hparams.target_bleu)
# Run evaluation when there's a new checkpoint
for ckpt in evaluation.checkpoints_iterator(FLAGS.out_dir):
# Terminate eval job once target score is reached
current_step = int(os.path.basename(ckpt).split("-")[1])
if current_step == 0:
current_epoch = 0
else:
current_epoch += 1
tf.logging.info("Starting to evaluate...%s", ckpt)
try:
mlperf_log.gnmt_print(
key=mlperf_log.TRAIN_CHECKPOINT, value=("Under " + ckpt))
mlperf_log.gnmt_print(key=mlperf_log.EVAL_START)
score = run_main(FLAGS, default_hparams, estimator.eval_fn)
mlperf_log.gnmt_print(key=mlperf_log.EVAL_ACCURACY,
value={"value": score, "epoch": current_epoch})
mlperf_log.gnmt_print(key=mlperf_log.EVAL_STOP, value=current_epoch)
if score > FLAGS.target_bleu:
tf.logging.info(
"Evaluation finished after training step %d" % current_step)
mlperf_log.gnmt_print(mlperf_log.RUN_STOP, {"success": True})
break
# Terminate eval job when final checkpoint is reached
max_steps = default_hparams.num_train_steps
if current_step >= max_steps:
tf.logging.info(
"Evaluation finished but failed to reach target score.")
mlperf_log.gnmt_print(mlperf_log.RUN_STOP, {"success": False})
break
except tf.errors.NotFoundError:
tf.logging.info(
"Checkpoint %s no longer exists, skipping checkpoint" % ckpt)
mlperf_log.gnmt_print(key=mlperf_log.RUN_FINAL)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
FLAGS, unparsed = nmt_parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
kyleabeauchamp/EnsemblePaper | code/src/ALA3.py | 1 | 2174 | import pandas as pd
ff_map = { # Used to rename forcefields from 'amber96' to 'ff96' without having to re-name the raw data files.
"""#amber99""":"""#ff99""", # Some messed up values of cross validation that were discarded
"amber96":"ff96",
"amber99":"ff99",
"amber99sbnmr-ildn":"ff99sbnmr-ildn",
"oplsaa":"oplsaa",
"charmm27":"charmm27"
}
#model = "combined_shifts"
model = "maxent"
ff_list = ["amber96","amber99","amber99sbnmr-ildn","charmm27","oplsaa"]
mapped_ff_list = [ff_map[x] for x in ff_list]
prior_list = ["maxent", "dirichlet", "MVN"]
#train_keys = ['JC_2_J3_HN_Cprime', 'CS_2_CA', 'CS_2_H', 'JC_3_J2_N_CA', 'CS_2_CB', 'JC_2_J3_HN_CB']
#test_keys = ["JC_2_J3_HN_HA" , "JC_2_J3_HA_Cprime", "JC_2_J1_N_CA"]
tuples = [("JC", 2, "J3_HN_Cprime"), ("JC", 3, "J2_N_CA"), ("JC", 2, "J3_HN_CB"), ("CS", 2, "CA"), ("CS", 2, "H"), ("CS", 2, "CB")]
train_keys = pd.MultiIndex.from_tuples(tuples, names=("experiment", "resid", "name"))
tuples = [("JC", 2, "J3_HN_HA"), ("JC", 2, "J3_HA_Cprime"), ("JC", 2, "J1_N_CA"), ("CS", 2, "HA")]
test_keys = pd.MultiIndex.from_tuples(tuples, names=("experiment", "resid", "name"))
#all_keys = []
#all_keys.extend(train_keys)
#all_keys.extend(test_keys)
bw_num_samples = 1000000
#num_samples = 5000000
num_samples = 10000000
thin = 100
burn = 5000
kfold = 2
num_blocks = 10
stride = 1
cross_val_stride = 20
regularization_strength_dict = {"maxent":
{
"amber96":10,
"amber99":4,
"amber99sbnmr-ildn":100,
"charmm27":6,
"oplsaa":15
}
,
"MVN":
{
"amber96":6,
"amber99":1,
"amber99sbnmr-ildn":100,
"charmm27":4,
"oplsaa":12
},
"dirichlet":
{
"amber96":7,
"amber99":1.2,
"amber99sbnmr-ildn":100,
"charmm27":4,
"oplsaa":13
}
}
old_regularization_strength_dict = {"maxent":
{
"amber96":5,
"amber99":1,
"amber99sbnmr-ildn":1,
"charmm27":1,
"oplsaa":7
}
,
"MVN":
{
"amber96":8,
"amber99":1,
"amber99sbnmr-ildn":2,
"charmm27":1,
"oplsaa":14
}
}
data_directory = "/home/kyleb/dat/ala_lvbp/"
outdir = "/home/kyleb/src/kyleabeauchamp/EnsemblePaper/paper/figures/"
cross_val_filename = outdir + "../../data/cross_val.dat"
chi2_filename = outdir + "../../data/chi2.dat"
experiment_filename = outdir + "../../data/experimental_data.csv"
| gpl-3.0 |
eayoungs/tinynumpy | docs/ext/docscrape_sphinx.py | 9 | 7751 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if False and autosum:
out += ['.. autosummary::', ' :toctree:', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| mit |
aspiringguru/sentexTuts | PracMachLrng/sentex_ML_demo10.py | 1 | 4185 | '''
working exercise from sentex tutorials. with mods for clarification + api doc references.
Classification w/ K Nearest Neighbors Intro - Practical Machine Learning Tutorial with Python p.13
https://youtu.be/44jq6ano5n0?list=PLQVvvaa0QuDfKTOs3Keq_kaG2P55YRn5v
linear regression model y=mx+b
m = mean(x).mean(y) - mean (x.y)
------------------------------
(mean(x)^2 - mean(x^2)
b = mean(y) - m . mean(x)
Coefficient of Linear Regression, ranges from 0 to 1.
An R2 of 0 means that the dependent variable cannot be predicted from the independent variable.
No correlation: If there is no linear correlation or a weak linear correlation, r is
close to 0. A value near zero means that there is a random, nonlinear relationship
between the two variables
An R2 of 1 means the dependent variable can be predicted without error from the independent variable.
A perfect correlation of +/- 1 occurs only when the data points all lie exactly on a
straight line. If r = +1, the slope of this line is positive. If r = -1, the slope of this
line is negative.
An R2 between 0 and 1 indicates the extent to which the dependent variable is predictable.
An R2 of 0.10 means that 10 percent of the variance in Y is predictable from X;
an R2 of 0.20 means that 20 percent is predictable; and so on.
The other % of error is unexplained by the model.
A correlation greater than 0.8 is generally described as strong, whereas a correlation less than 0.5 is
generally described as weak. These values can vary based upon the "type" of data being examined.
A study utilizing scientific data may require a stronger correlation than a study using social science data.
r^2 = 1 -[ (Standard Error y^) / (standard Error (mean of y) ]
'''
from statistics import mean
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import random
style.use('fivethirtyeight')
xs = [1,2,3,4,5,6]
ys = [5,4,6,5,6,7]
#plt.scatter(xs, ys)
#plt.show()
def create_dataset(hm, variance, step=2, correlation=False):
#correlation to be +ive/-ive or none.
# if correlation is 'pos' then positive correlation
# if correlation is 'neg' then negative correlation
val = 1
ys = []
for i in range(hm):
y = val + random.randrange(-variance, variance)
ys.append(y)
if correlation and correlation == 'pos':
val += step
elif correlation and correlation == 'neg':
val -= step
#so far we have generated an array of values within range (1-variance) to (1+variance)
xs = [i for i in range(len(ys))]
return np.array(xs, dtype=np.float64), np.array(ys, dtype=np.float64)
def best_fit_slope_and_intercept(xs, ys):
m = (mean(xs) * mean(ys) - mean(xs*ys)) / ( mean(xs)*mean(xs) - mean(xs*xs) )
b = mean(ys) - m * mean(xs)
return m, b
def squared_error(ys_orig, ys_line):
#error between the predicted y line and the actual points.
return sum((ys_line-ys_orig)**2)
def coefficient_of_determination(ys_orig, ys_line):
#
y_mean_line = [mean(ys_orig) for y in ys_orig]
#creat array filled with mean of original y values.
squared_error_regr = squared_error(ys_orig, ys_line)
squared_error_y_mean = squared_error(ys_orig, y_mean_line)
return 1- (squared_error_regr/squared_error_y_mean)
#create data using new function
#create_dataset(hm, variance, step=2, correlation=False),
# returns np.array(xs, dtype=np.float64), np.array(ys, dtype=np.float64)
xs, ys = create_dataset(40, 5, 2, correlation='pos')
print ("xs=", xs)
print ("ys=", ys)
m,b = best_fit_slope_and_intercept(xs, ys)
#regression_line = xs*m+b
regression_line = [m*x+b for x in xs]
print ( "m={}".format(m), ", b={}".format(b) )
predict_x = 8
predict_y = (m*predict_x) + b
r_squared = coefficient_of_determination(ys, regression_line)
print ("r_squared=", r_squared)
plt.scatter(xs, ys)
#plt.plot(xs, xs*m+b)
plt.plot(xs, regression_line)
plt.scatter(predict_x, predict_y, color = 'g', marker='s', s=100)
plt.xlabel('xs')
plt.ylabel('ys')
plt.title("plot mx+b using linear regression fit")
plt.show()
'''
http://matplotlib.org/examples/style_sheets/plot_fivethirtyeight.html
''' | mit |
newville/scikit-image | doc/examples/plot_shapes.py | 22 | 1913 | """
======
Shapes
======
This example shows how to draw several different shapes:
- line
- Bezier curve
- polygon
- circle
- ellipse
Anti-aliased drawing for:
- line
- circle
"""
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage.draw import (line, polygon, circle,
circle_perimeter,
ellipse, ellipse_perimeter,
bezier_curve)
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(10, 6))
img = np.zeros((500, 500, 3), dtype=np.double)
# draw line
rr, cc = line(120, 123, 20, 400)
img[rr, cc, 0] = 255
# fill polygon
poly = np.array((
(300, 300),
(480, 320),
(380, 430),
(220, 590),
(300, 300),
))
rr, cc = polygon(poly[:, 0], poly[:, 1], img.shape)
img[rr, cc, 1] = 1
# fill circle
rr, cc = circle(200, 200, 100, img.shape)
img[rr, cc, :] = (1, 1, 0)
# fill ellipse
rr, cc = ellipse(300, 300, 100, 200, img.shape)
img[rr, cc, 2] = 1
# circle
rr, cc = circle_perimeter(120, 400, 15)
img[rr, cc, :] = (1, 0, 0)
# Bezier curve
rr, cc = bezier_curve(70, 100, 10, 10, 150, 100, 1)
img[rr, cc, :] = (1, 0, 0)
# ellipses
rr, cc = ellipse_perimeter(120, 400, 60, 20, orientation=math.pi / 4.)
img[rr, cc, :] = (1, 0, 1)
rr, cc = ellipse_perimeter(120, 400, 60, 20, orientation=-math.pi / 4.)
img[rr, cc, :] = (0, 0, 1)
rr, cc = ellipse_perimeter(120, 400, 60, 20, orientation=math.pi / 2.)
img[rr, cc, :] = (1, 1, 1)
ax1.imshow(img)
ax1.set_title('No anti-aliasing')
ax1.axis('off')
from skimage.draw import line_aa, circle_perimeter_aa
img = np.zeros((100, 100), dtype=np.double)
# anti-aliased line
rr, cc, val = line_aa(12, 12, 20, 50)
img[rr, cc] = val
# anti-aliased circle
rr, cc, val = circle_perimeter_aa(60, 40, 30)
img[rr, cc] = val
ax2.imshow(img, cmap=plt.cm.gray, interpolation='nearest')
ax2.set_title('Anti-aliasing')
ax2.axis('off')
plt.show()
| bsd-3-clause |
joernhees/scikit-learn | examples/tree/plot_unveil_tree_structure.py | 47 | 4852 | """
=========================================
Understanding the decision tree structure
=========================================
The decision tree structure can be analysed to gain further insight on the
relation between the features and the target to predict. In this example, we
show how to retrieve:
- the binary tree structure;
- the depth of each node and whether or not it's a leaf;
- the nodes that were reached by a sample using the ``decision_path`` method;
- the leaf that was reached by a sample using the apply method;
- the rules that were used to predict a sample;
- the decision path shared by a group of samples.
"""
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0)
estimator.fit(X_train, y_train)
# The decision estimator has an attribute called tree_ which stores the entire
# tree structure and allows access to low level attributes. The binary tree
# tree_ is represented as a number of parallel arrays. The i-th element of each
# array holds information about the node `i`. Node 0 is the tree's root. NOTE:
# Some of the arrays only apply to either leaves or split nodes, resp. In this
# case the values of nodes of the other type are arbitrary!
#
# Among those arrays, we have:
# - left_child, id of the left child of the node
# - right_child, id of the right child of the node
# - feature, feature used for splitting the node
# - threshold, threshold value at the node
#
# Using those arrays, we can parse the tree structure:
n_nodes = estimator.tree_.node_count
children_left = estimator.tree_.children_left
children_right = estimator.tree_.children_right
feature = estimator.tree_.feature
threshold = estimator.tree_.threshold
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes, dtype=np.int64)
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if (children_left[node_id] != children_right[node_id]):
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
print("The binary tree structure has %s nodes and has "
"the following tree structure:"
% n_nodes)
for i in range(n_nodes):
if is_leaves[i]:
print("%snode=%s leaf node." % (node_depth[i] * "\t", i))
else:
print("%snode=%s test node: go to node %s if X[:, %s] <= %s else to "
"node %s."
% (node_depth[i] * "\t",
i,
children_left[i],
feature[i],
threshold[i],
children_right[i],
))
print()
# First let's retrieve the decision path of each sample. The decision_path
# method allows to retrieve the node indicator functions. A non zero element of
# indicator matrix at the position (i, j) indicates that the sample i goes
# through the node j.
node_indicator = estimator.decision_path(X_test)
# Similarly, we can also have the leaves ids reached by each sample.
leave_id = estimator.apply(X_test)
# Now, it's possible to get the tests that were used to predict a sample or
# a group of samples. First, let's make it for the sample.
sample_id = 0
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
print('Rules used to predict sample %s: ' % sample_id)
for node_id in node_index:
if leave_id[sample_id] != node_id:
continue
if (X_test[sample_id, feature[node_id]] <= threshold[node_id]):
threshold_sign = "<="
else:
threshold_sign = ">"
print("decision id node %s : (X_test[%s, %s] (= %s) %s %s)"
% (node_id,
sample_id,
feature[node_id],
X_test[sample_id, feature[node_id]],
threshold_sign,
threshold[node_id]))
# For a group of samples, we have the following common node.
sample_ids = [0, 1]
common_nodes = (node_indicator.toarray()[sample_ids].sum(axis=0) ==
len(sample_ids))
common_node_id = np.arange(n_nodes)[common_nodes]
print("\nThe following samples %s share the node %s in the tree"
% (sample_ids, common_node_id))
print("It is %s %% of all nodes." % (100 * len(common_node_id) / n_nodes,))
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/scipy/signal/fir_filter_design.py | 40 | 20637 | """Functions for FIR filter design."""
from __future__ import division, print_function, absolute_import
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
from . import sigtools
__all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord',
'firwin', 'firwin2', 'remez']
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
numtaps : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and
stopband (or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""
Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta : float
The beta parameter for the kaiser window.
See Also
--------
kaiser_beta, kaiser_atten
Notes
-----
There are several ways to obtain the Kaiser window:
- ``signal.kaiser(numtaps, beta, sym=0)``
- ``signal.get_window(beta, numtaps)``
- ``signal.get_window(('kaiser', beta), numtaps)``
The empirical equations discovered by Kaiser are used.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None, optional
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values, optional
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool, optional
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool, optional
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float, optional
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See also
--------
scipy.signal.firwin2
Examples
--------
Low-pass from 0 to f:
>>> from scipy import signal
>>> numtaps = 3
>>> f = 0.1
>>> signal.firwin(numtaps, f)
array([ 0.06799017, 0.86401967, 0.06799017])
Use a specific window function:
>>> signal.firwin(numtaps, f, window='nuttall')
array([ 3.56607041e-04, 9.99286786e-01, 3.56607041e-04])
High-pass ('stop' from 0 to f):
>>> signal.firwin(numtaps, f, pass_zero=False)
array([-0.00859313, 0.98281375, -0.00859313])
Band-pass:
>>> f1, f2 = 0.1, 0.2
>>> signal.firwin(numtaps, [f1, f2], pass_zero=False)
array([ 0.06301614, 0.88770441, 0.06301614])
Band-stop:
>>> signal.firwin(numtaps, [f1, f2])
array([-0.00801395, 1.0160279 , -0.00801395])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1]):
>>> f3, f4 = 0.3, 0.4
>>> signal.firwin(numtaps, [f1, f2, f3, f4])
array([-0.01376344, 1.02752689, -0.01376344])
Multi-band (passbands are [f1, f2] and [f3,f4]):
>>> signal.firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
array([ 0.04890915, 0.91284326, 0.04890915])
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be "
"greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width) / nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from .signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0,
antisymmetric=False):
"""
FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`.
freq : array_like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array_like
The filter gains at the frequency sampling points. Certain
constraints to gain values, depending on the filter type, are applied,
see Notes for details.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float, optional
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
antisymmetric : bool, optional
Whether resulting impulse response is symmetric/antisymmetric.
See Notes for more details.
Returns
-------
taps : ndarray
The filter coefficients of the FIR filter, as a 1-D array of length
`numtaps`.
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The type of filter is determined by
the value of 'numtaps` and `antisymmetric` flag.
There are four possible combinations:
- odd `numtaps`, `antisymmetric` is False, type I filter is produced
- even `numtaps`, `antisymmetric` is False, type II filter is produced
- odd `numtaps`, `antisymmetric` is True, type III filter is produced
- even `numtaps`, `antisymmetric` is True, type IV filter is produced
Magnitude response of all but type I filters are subjects to following
constraints:
- type II -- zero at the Nyquist frequency
- type III -- zero at zero and Nyquist frequencies
- type IV -- zero at zero frequency
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> from scipy import signal
>>> taps = signal.firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError(('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s') %
(numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if antisymmetric:
if numtaps % 2 == 0:
ftype = 4
else:
ftype = 3
else:
if numtaps % 2 == 0:
ftype = 2
else:
ftype = 1
if ftype == 2 and gain[-1] != 0.0:
raise ValueError("A Type II filter must have zero gain at the "
"Nyquist rate.")
elif ftype == 3 and (gain[0] != 0.0 or gain[-1] != 0.0):
raise ValueError("A Type III filter must have zero gain at zero "
"and Nyquist rates.")
elif ftype == 4 and gain[0] != 0.0:
raise ValueError("A Type IV filter must have zero gain at zero rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
if ftype > 2:
shift *= 1j
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from .signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
if ftype == 3:
out[out.size // 2] = 0.0
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> from scipy import signal
>>> bpass = signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass': 1, 'differentiator': 2, 'hilbert': 3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', "
"or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
| mit |
BenjaminBossan/nolearn | nolearn/lasagne/tests/test_base.py | 1 | 31838 | import pickle
import sys
from lasagne.layers import get_output
from lasagne.layers import BatchNormLayer
from lasagne.layers import ConcatLayer
from lasagne.layers import Conv2DLayer
from lasagne.layers import DenseLayer
from lasagne.layers import InputLayer
from lasagne.layers import Layer
from lasagne.nonlinearities import identity
from lasagne.nonlinearities import softmax
from lasagne.nonlinearities import sigmoid
from lasagne.objectives import categorical_crossentropy
from lasagne.objectives import aggregate
from lasagne.updates import nesterov_momentum
from mock import Mock
from mock import patch
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
import theano
import theano.tensor as T
floatX = theano.config.floatX
class TestLayers:
@pytest.fixture
def layers(self):
from nolearn.lasagne.base import Layers
return Layers([('one', 1), ('two', 2), ('three', 3)])
def test_getitem_with_key(self, layers):
assert layers['one'] == 1
def test_getitem_with_index(self, layers):
assert layers[0] == 1
def test_getitem_with_slice(self, layers):
from nolearn.lasagne.base import Layers
sliced = layers[:2]
assert isinstance(sliced, Layers)
assert sliced.keys() == ['one', 'two']
assert sliced.values() == [1, 2]
def test_keys_returns_list(self, layers):
assert layers.keys() == ['one', 'two', 'three']
def test_values_returns_list(self, layers):
assert layers.values() == [1, 2, 3]
class TestFunctionalToy:
def classif(self, NeuralNet, X, y):
l = InputLayer(shape=(None, X.shape[1]))
l = DenseLayer(l, num_units=len(np.unique(y)), nonlinearity=softmax)
net = NeuralNet(l, update_learning_rate=0.01)
return net.fit(X, y)
def classif_no_valid(self, NeuralNet, X, y):
from nolearn.lasagne import TrainSplit
l = InputLayer(shape=(None, X.shape[1]))
l = DenseLayer(l, num_units=len(np.unique(y)), nonlinearity=softmax)
net = NeuralNet(
l, update_learning_rate=0.01, train_split=TrainSplit(0))
return net.fit(X, y)
def regr(self, NeuralNet, X, y):
l = InputLayer(shape=(None, X.shape[1]))
l = DenseLayer(l, num_units=y.shape[1], nonlinearity=None)
net = NeuralNet(l, regression=True, update_learning_rate=0.01)
return net.fit(X, y)
def test_classif_two_classes(self, NeuralNet):
X, y = make_classification()
X = X.astype(floatX)
y = y.astype(np.int32)
self.classif(NeuralNet, X, y)
def test_classif_ten_classes(self, NeuralNet):
X, y = make_classification(n_classes=10, n_informative=10)
X = X.astype(floatX)
y = y.astype(np.int32)
self.classif(NeuralNet, X, y)
def test_classif_no_valid_two_classes(self, NeuralNet):
X, y = make_classification()
X = X.astype(floatX)
y = y.astype(np.int32)
self.classif_no_valid(NeuralNet, X, y)
def test_regr_one_target(self, NeuralNet):
X, y = make_regression()
X = X.astype(floatX)
y = y.reshape(-1, 1).astype(np.float32)
self.regr(NeuralNet, X, y)
def test_regr_ten_targets(self, NeuralNet):
X, y = make_regression(n_targets=10)
X = X.astype(floatX)
y = y.astype(floatX)
self.regr(NeuralNet, X, y)
class TestFunctionalMNIST:
def test_accuracy(self, net_fitted, mnist, X_test, y_pred):
X, y = mnist
y_test = y[60000:]
acc = accuracy_score(y_pred, y_test)
assert acc > 0.85
assert net_fitted.score(X_test, y_test) == acc
def test_train_history(self, net_fitted):
history = net_fitted.train_history_
assert len(history) == 2 # due to early stopping
assert history[1]['valid_accuracy'] > 0.85
assert history[1]['valid_accuracy'] > history[0]['valid_accuracy']
assert set(history[0].keys()) == set([
'dur', 'epoch', 'train_loss', 'train_loss_best',
'valid_loss', 'valid_loss_best', 'valid_accuracy',
])
def test_early_stopping(self, net_fitted):
early_stopping = net_fitted.on_epoch_finished[0]
assert early_stopping.train_history == net_fitted.train_history_
def test_pickle(self, net_fitted, X_test, y_pred):
recursionlimit = sys.getrecursionlimit()
sys.setrecursionlimit(10000)
pickled = pickle.dumps(net_fitted, -1)
net_loaded = pickle.loads(pickled)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
sys.setrecursionlimit(recursionlimit)
def test_load_params_from_net(self, net, net_fitted, X_test, y_pred):
net_loaded = clone(net)
net_loaded.load_params_from(net_fitted)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_load_params_from_params_values(self, net, net_fitted,
X_test, y_pred):
net_loaded = clone(net)
net_loaded.load_params_from(net_fitted.get_all_params_values())
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_save_params_to_path(self, net_fitted, X_test, y_pred):
path = '/tmp/test_lasagne_functional_mnist.params'
net_fitted.save_params_to(path)
net_loaded = clone(net_fitted)
net_loaded.load_params_from(path)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_load_params_from_message(self, net, net_fitted, capsys):
net2 = clone(net)
net2.verbose = 1
net2.load_params_from(net_fitted)
out = capsys.readouterr()[0]
message = """\
Loaded parameters to layer 'conv1' (shape 8x1x5x5).
Loaded parameters to layer 'conv1' (shape 8).
Loaded parameters to layer 'conv2' (shape 8x8x5x5).
Loaded parameters to layer 'conv2' (shape 8).
Loaded parameters to layer 'hidden1' (shape 128x128).
Loaded parameters to layer 'hidden1' (shape 128).
Loaded parameters to layer 'output' (shape 128x10).
Loaded parameters to layer 'output' (shape 10).
"""
assert out == message
def test_partial_fit(self, net, X_train, y_train):
net2 = clone(net)
assert net2.partial_fit(X_train, y_train) is net2
net2.partial_fit(X_train, y_train)
history = net2.train_history_
assert len(history) == 2
assert history[1]['valid_accuracy'] > 0.85
def test_lasagne_functional_grid_search(mnist, monkeypatch):
# Make sure that we can satisfy the grid search interface.
from nolearn.lasagne import NeuralNet
nn = NeuralNet(
layers=[],
)
param_grid = {
'more_params': [{'hidden_num_units': 100}, {'hidden_num_units': 200}],
'update_momentum': [0.9, 0.98],
}
X, y = mnist
vars_hist = []
def fit(self, X, y):
vars_hist.append(vars(self).copy())
return self
with patch.object(NeuralNet, 'fit', autospec=True) as mock_fit:
mock_fit.side_effect = fit
with patch('nolearn.lasagne.NeuralNet.score') as score:
score.return_value = 0.3
gs = GridSearchCV(nn, param_grid, cv=2, refit=False, verbose=4)
gs.fit(X, y)
assert [entry['update_momentum'] for entry in vars_hist] == [
0.9, 0.9, 0.98, 0.98] * 2
assert [entry['more_params'] for entry in vars_hist] == (
[{'hidden_num_units': 100}] * 4 +
[{'hidden_num_units': 200}] * 4
)
def test_clone():
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import BatchIterator
from nolearn.lasagne import objective
params = dict(
layers=[
('input', InputLayer),
('hidden', DenseLayer),
('output', DenseLayer),
],
input_shape=(100, 784),
output_num_units=10,
output_nonlinearity=softmax,
more_params={
'hidden_num_units': 100,
},
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=False,
objective=objective,
objective_loss_function=categorical_crossentropy,
batch_iterator_train=BatchIterator(batch_size=100),
y_tensor_type=T.ivector,
use_label_encoder=False,
on_epoch_finished=None,
on_training_finished=None,
max_epochs=100,
eval_size=0.1, # BBB
check_input=True,
verbose=0,
)
nn = NeuralNet(**params)
nn2 = clone(nn)
params1 = nn.get_params()
params2 = nn2.get_params()
for ignore in (
'batch_iterator_train',
'batch_iterator_test',
'output_nonlinearity',
'loss',
'objective',
'train_split',
'eval_size',
'X_tensor_type',
'on_epoch_finished',
'on_batch_finished',
'on_training_started',
'on_training_finished',
'custom_scores',
'scores_train',
'scores_valid',
):
for par in (params, params1, params2):
par.pop(ignore, None)
assert params == params1 == params2
def test_lasagne_functional_regression(boston):
from nolearn.lasagne import NeuralNet
X, y = boston
layer1 = InputLayer(shape=(128, 13))
layer2 = DenseLayer(layer1, num_units=100)
output = DenseLayer(layer2, num_units=1, nonlinearity=identity)
nn = NeuralNet(
layers=output,
update_learning_rate=0.01,
update_momentum=0.1,
regression=True,
max_epochs=50,
)
nn.fit(X[:300], y[:300])
assert mean_absolute_error(nn.predict(X[300:]), y[300:]) < 3.0
assert r2_score(nn.predict(X[300:]), y[300:]) == nn.score(X[300:], y[300:])
class TestDefaultObjective:
@pytest.fixture
def get_output(self, monkeypatch):
from nolearn.lasagne import base
get_output_mock = Mock()
monkeypatch.setattr(base, 'get_output', get_output_mock)
return get_output_mock
@pytest.fixture
def objective(self):
from nolearn.lasagne.base import objective
return objective
def test_with_defaults(self, objective, get_output):
loss_function, target = Mock(), Mock()
loss_function.return_value = np.array([1, 2, 3])
result = objective(
[1, 2, 3], loss_function=loss_function, target=target)
assert result == 2.0
get_output.assert_called_with(3, deterministic=False)
loss_function.assert_called_with(get_output.return_value, target)
def test_with_get_output_kw(self, objective, get_output):
loss_function, target = Mock(), Mock()
loss_function.return_value = np.array([1, 2, 3])
objective(
[1, 2, 3], loss_function=loss_function, target=target,
get_output_kw={'i_was': 'here'},
)
get_output.assert_called_with(3, deterministic=False, i_was='here')
class TestTrainSplit:
@pytest.fixture
def TrainSplit(self):
from nolearn.lasagne import TrainSplit
return TrainSplit
def test_reproducable(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train1, X_valid1, y_train1, y_valid1 = TrainSplit(0.2)(
X, y, nn)
X_train2, X_valid2, y_train2, y_valid2 = TrainSplit(0.2)(
X, y, nn)
assert np.all(X_train1 == X_train2)
assert np.all(y_valid1 == y_valid2)
def test_eval_size_zero(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0.0)(
X, y, nn)
assert len(X_train) == len(X)
assert len(y_train) == len(y)
assert len(X_valid) == 0
assert len(y_valid) == 0
def test_eval_size_half(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0.51)(
X, y, nn)
assert len(X_train) + len(X_valid) == 100
assert len(y_train) + len(y_valid) == 100
assert len(X_train) > 45
def test_regression(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.random.random((100))
nn.regression = True
X_train, X_valid, y_train, y_valid = TrainSplit(0.2)(
X, y, nn)
assert len(X_train) == len(y_train) == 80
assert len(X_valid) == len(y_valid) == 20
def test_stratified(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.hstack([np.repeat([0, 0, 0], 25), np.repeat([1], 25)])
X_train, X_valid, y_train, y_valid = TrainSplit(0.2)(
X, y, nn)
assert y_train.sum() == 0.8 * 25
assert y_valid.sum() == 0.2 * 25
def test_not_stratified(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.hstack([np.repeat([0, 0, 0], 25), np.repeat([1], 25)])
X_train, X_valid, y_train, y_valid = TrainSplit(0.2, stratify=False)(
X, y, nn)
assert y_train.sum() == 25
assert y_valid.sum() == 0
def test_X_is_dict(self, TrainSplit, nn):
X = {
'1': np.random.random((100, 10)),
'2': np.random.random((100, 10)),
}
y = np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0.2)(
X, y, nn)
assert len(X_train['1']) == len(X_train['2']) == len(y_train) == 80
assert len(X_valid['1']) == len(X_valid['2']) == len(y_valid) == 20
def test_X_is_dict_eval_size_0(self, TrainSplit, nn):
X = {
'1': np.random.random((100, 10)),
'2': np.random.random((100, 10)),
}
y = np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0)(
X, y, nn)
assert len(X_train['1']) == len(X_train['2']) == len(y_train) == 100
assert len(X_valid['1']) == len(X_valid['2']) == len(y_valid) == 0
class TestTrainTestSplitBackwardCompatibility:
@pytest.fixture
def LegacyNet(self, NeuralNet):
class LegacyNet(NeuralNet):
def train_test_split(self, X, y, eval_size):
self.__call_args__ = (X, y, eval_size)
split = int(X.shape[0] * eval_size)
return X[:split], X[split:], y[:split], y[split:]
return LegacyNet
def test_legacy_eval_size(self, NeuralNet):
net = NeuralNet([], eval_size=0.3, max_epochs=0)
assert net.train_split.eval_size == 0.3
def test_legacy_method_default_eval_size(self, LegacyNet):
net = LegacyNet([], max_epochs=0)
X, y = np.ones((10, 3)), np.zeros(10)
net.train_loop(X, y)
assert net.__call_args__ == (X, y, 0.2)
def test_legacy_method_given_eval_size(self, LegacyNet):
net = LegacyNet([], eval_size=0.3, max_epochs=0)
X, y = np.ones((10, 3)), np.zeros(10)
net.train_loop(X, y)
assert net.__call_args__ == (X, y, 0.3)
class TestBatchIterator:
@pytest.fixture
def BatchIterator(self):
from nolearn.lasagne import BatchIterator
return BatchIterator
@pytest.fixture
def X(self):
return np.arange(200).reshape((10, 20)).T.astype('float')
@pytest.fixture
def X_dict(self):
return {
'one': np.arange(200).reshape((10, 20)).T.astype('float'),
'two': np.arange(200).reshape((20, 10)).astype('float'),
}
@pytest.fixture
def y(self):
return np.arange(20)
@pytest.mark.parametrize("shuffle", [True, False])
def test_simple_x_and_y(self, BatchIterator, X, y, shuffle):
bi = BatchIterator(2, shuffle=shuffle)(X, y)
batches = list(bi)
assert len(batches) == 10
X0, y0 = batches[0]
assert X0.shape == (2, 10)
assert y0.shape == (2,)
Xt = np.vstack(b[0] for b in batches)
yt = np.hstack(b[1] for b in batches)
assert Xt.shape == X.shape
assert yt.shape == y.shape
np.testing.assert_equal(Xt[:, 0], yt)
if shuffle is False:
np.testing.assert_equal(X[:2], X0)
np.testing.assert_equal(y[:2], y0)
@pytest.mark.parametrize("shuffle", [True, False])
def test_simple_x_no_y(self, BatchIterator, X, shuffle):
bi = BatchIterator(2, shuffle=shuffle)(X)
batches = list(bi)
assert len(batches) == 10
X0, y0 = batches[0]
assert X0.shape == (2, 10)
assert y0 is None
if shuffle is False:
np.testing.assert_equal(X[:2], X0)
@pytest.mark.parametrize("shuffle", [True, False])
def test_X_is_dict(self, BatchIterator, X_dict, shuffle):
bi = BatchIterator(2, shuffle=shuffle)(X_dict)
batches = list(bi)
assert len(batches) == 10
X0, y0 = batches[0]
assert X0['one'].shape == (2, 10)
assert X0['two'].shape == (2, 10)
assert y0 is None
Xt1 = np.vstack(b[0]['one'] for b in batches)
Xt2 = np.vstack(b[0]['two'] for b in batches)
assert Xt1.shape == X_dict['one'].shape
assert Xt2.shape == X_dict['two'].shape
np.testing.assert_equal(Xt1[:, 0], Xt2[:, 0] / 10)
if shuffle is False:
np.testing.assert_equal(X_dict['one'][:2], X0['one'])
np.testing.assert_equal(X_dict['two'][:2], X0['two'])
def test_shuffle_no_copy(self, BatchIterator, X, y):
bi = BatchIterator(2, shuffle=True)(X, y)
X0, y0 = list(bi)[0]
assert X0.base is X # make sure X0 is a view
class TestCheckForUnusedKwargs:
def test_okay(self, NeuralNet):
net = NeuralNet(
layers=[('input', Mock), ('mylayer', Mock)],
input_shape=(10, 10),
mylayer_hey='hey',
update_foo=1,
update_bar=2,
)
net._create_iter_funcs = lambda *args: (1, 2, 3)
net.initialize()
def test_unused(self, NeuralNet):
net = NeuralNet(
layers=[('input', Mock), ('mylayer', Mock)],
input_shape=(10, 10),
mylayer_hey='hey',
yourlayer_ho='ho',
update_foo=1,
update_bar=2,
)
net._create_iter_funcs = lambda *args: (1, 2, 3)
with pytest.raises(ValueError) as err:
net.initialize()
assert str(err.value) == 'Unused kwarg: yourlayer_ho'
class TestInitializeLayers:
def test_initialization_with_layer_instance(self, NeuralNet):
layer1 = InputLayer(shape=(128, 13)) # name will be assigned
layer2 = DenseLayer(layer1, name='output', num_units=2) # has name
nn = NeuralNet(layers=layer2)
out = nn.initialize_layers()
assert nn.layers_['output'] == layer2 == out[0]
assert nn.layers_['input0'] == layer1
def test_initialization_with_layer_instance_bad_params(self, NeuralNet):
layer = DenseLayer(InputLayer(shape=(128, 13)), num_units=2)
nn = NeuralNet(layers=layer, dense1_num_units=3)
with pytest.raises(ValueError):
nn.initialize_layers()
def test_initialization_with_tuples(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
(input, {'shape': (10, 10), 'name': 'input'}),
(hidden1, {'some': 'param', 'another': 'param'}),
(hidden2, {}),
(output, {'name': 'output'}),
],
input_shape=(10, 10),
mock1_some='iwin',
)
out = nn.initialize_layers(nn.layers)
input.assert_called_with(
name='input', shape=(10, 10))
assert nn.layers_['input'] is input.return_value
hidden1.assert_called_with(
incoming=input.return_value, name='mock1',
some='iwin', another='param')
assert nn.layers_['mock1'] is hidden1.return_value
hidden2.assert_called_with(
incoming=hidden1.return_value, name='mock2')
assert nn.layers_['mock2'] is hidden2.return_value
output.assert_called_with(
incoming=hidden2.return_value, name='output')
assert out[0] is nn.layers_['output']
def test_initializtion_with_tuples_resolve_layers(self, NeuralNet):
nn = NeuralNet(
layers=[
('lasagne.layers.InputLayer', {'shape': (None, 10)}),
('lasagne.layers.DenseLayer', {'num_units': 33}),
],
)
out, = nn.initialize_layers(nn.layers)
assert out.num_units == 33
def test_initialization_legacy(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('output', output),
],
input_shape=(10, 10),
hidden1_some='param',
)
out = nn.initialize_layers(nn.layers)
input.assert_called_with(
name='input', shape=(10, 10))
assert nn.layers_['input'] is input.return_value
hidden1.assert_called_with(
incoming=input.return_value, name='hidden1', some='param')
assert nn.layers_['hidden1'] is hidden1.return_value
hidden2.assert_called_with(
incoming=hidden1.return_value, name='hidden2')
assert nn.layers_['hidden2'] is hidden2.return_value
output.assert_called_with(
incoming=hidden2.return_value, name='output')
assert out[0] is nn.layers_['output']
def test_initializtion_legacy_resolve_layers(self, NeuralNet):
nn = NeuralNet(
layers=[
('input', 'lasagne.layers.InputLayer'),
('output', 'lasagne.layers.DenseLayer'),
],
input_shape=(None, 10),
output_num_units=33,
)
out, = nn.initialize_layers(nn.layers)
assert out.num_units == 33
def test_initialization_legacy_with_unicode_names(self, NeuralNet):
# Test whether legacy initialization is triggered; if not,
# raises error.
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
(u'input', input),
(u'hidden1', hidden1),
(u'hidden2', hidden2),
(u'output', output),
],
input_shape=(10, 10),
hidden1_some='param',
)
nn.initialize_layers()
def test_diamond(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, concat, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(4)]
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('concat', concat),
('output', output),
],
input_shape=(10, 10),
hidden2_incoming='input',
concat_incomings=['hidden1', 'hidden2'],
)
nn.initialize_layers(nn.layers)
input.assert_called_with(name='input', shape=(10, 10))
hidden1.assert_called_with(incoming=input.return_value, name='hidden1')
hidden2.assert_called_with(incoming=input.return_value, name='hidden2')
concat.assert_called_with(
incomings=[hidden1.return_value, hidden2.return_value],
name='concat'
)
output.assert_called_with(incoming=concat.return_value, name='output')
class TestCheckGoodInput:
@pytest.fixture
def check_good_input(self, nn):
return nn._check_good_input
@pytest.fixture
def X(self):
return np.arange(100).reshape(10, 10).astype(floatX)
@pytest.fixture
def y(self):
return np.arange(10).astype(np.int32)
@pytest.fixture
def y_regr(self):
return np.arange(10).reshape(-1, 1).astype(floatX)
def test_X_OK(self, check_good_input, X):
assert check_good_input(X) == (X, None)
def test_X_and_y_OK(self, check_good_input, X, y):
assert check_good_input(X, y) == (X, y)
def test_X_and_y_OK_regression(self, nn, check_good_input, X, y_regr):
nn.regression = True
assert check_good_input(X, y_regr) == (X, y_regr)
def test_X_and_y_length_mismatch(self, check_good_input, X, y):
with pytest.raises(ValueError):
check_good_input(
X[:9],
y
)
def test_X_dict_and_y_length_mismatch(self, check_good_input, X, y):
with pytest.raises(ValueError):
check_good_input(
{'one': X, 'two': X},
y[:9],
)
def test_X_dict_length_mismatch(self, check_good_input, X):
with pytest.raises(ValueError):
check_good_input({
'one': X,
'two': X[:9],
})
def test_y_regression_1dim(self, nn, check_good_input, X, y_regr):
y = y_regr.reshape(-1)
nn.regression = True
X1, y1 = check_good_input(X, y)
assert (X1 == X).all()
assert (y1 == y.reshape(-1, 1)).all()
def test_y_regression_2dim(self, nn, check_good_input, X, y_regr):
y = y_regr
nn.regression = True
X1, y1 = check_good_input(X, y)
assert (X1 == X).all()
assert (y1 == y).all()
class TestGetOutput:
def test_layer_object(self, net_fitted, X_train):
layer = net_fitted.layers_['conv2']
output = net_fitted.get_output(layer, X_train[:3])
assert output.shape == (3, 8, 8, 8)
def test_layer_name(self, net_fitted, X_train):
output = net_fitted.get_output('conv2', X_train[:3])
assert output.shape == (3, 8, 8, 8)
def test_get_output_last_layer(self, net_fitted, X_train):
result = net_fitted.get_output(net_fitted.layers_[-1], X_train[:129])
expected = net_fitted.predict_proba(X_train[:129])
np.testing.assert_equal(result, expected)
def test_no_conv(self, net_no_conv):
net_no_conv.initialize()
X = np.random.random((10, 100)).astype(floatX)
result = net_no_conv.get_output('output', X)
expected = net_no_conv.predict_proba(X)
np.testing.assert_equal(result, expected)
class TestMultiInputFunctional:
@pytest.fixture(scope='session')
def net(self, NeuralNet):
return NeuralNet(
layers=[
(InputLayer,
{'name': 'input1', 'shape': (None, 392)}),
(DenseLayer,
{'name': 'hidden1', 'num_units': 98}),
(InputLayer,
{'name': 'input2', 'shape': (None, 392)}),
(DenseLayer,
{'name': 'hidden2', 'num_units': 98}),
(ConcatLayer,
{'incomings': ['hidden1', 'hidden2']}),
(DenseLayer,
{'name': 'hidden3', 'num_units': 98}),
(DenseLayer,
{'name': 'output', 'num_units': 10, 'nonlinearity': softmax}),
],
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=2,
verbose=4,
)
@pytest.fixture(scope='session')
def net_fitted(self, net, mnist):
X, y = mnist
X_train, y_train = X[:10000], y[:10000]
X_train1, X_train2 = X_train[:, :392], X_train[:, 392:]
return net.fit({'input1': X_train1, 'input2': X_train2}, y_train)
@pytest.fixture(scope='session')
def y_pred(self, net_fitted, mnist):
X, y = mnist
X_test = X[60000:]
X_test1, X_test2 = X_test[:, :392], X_test[:, 392:]
return net_fitted.predict({'input1': X_test1, 'input2': X_test2})
def test_accuracy(self, net_fitted, mnist, y_pred):
X, y = mnist
y_test = y[60000:]
assert accuracy_score(y_pred, y_test) > 0.85
class TestGradScale:
@pytest.fixture
def grad_scale(self):
from nolearn.lasagne import grad_scale
return grad_scale
@pytest.mark.parametrize("layer", [
BatchNormLayer(InputLayer((None, 16))),
Conv2DLayer(InputLayer((None, 1, 28, 28)), 2, 3),
DenseLayer(InputLayer((None, 16)), 16),
])
def test_it(self, grad_scale, layer):
layer2 = grad_scale(layer, 0.33)
assert layer2 is layer
for param in layer.get_params(trainable=True):
np.testing.assert_almost_equal(param.tag.grad_scale, 0.33)
for param in layer.get_params(trainable=False):
assert hasattr(param.tag, 'grad_scale') is False
class TestMultiOutput:
@pytest.fixture(scope='class')
def mo_net(self, NeuralNet):
def objective(layers_, target, **kwargs):
out_a_layer = layers_['output_a']
out_b_layer = layers_['output_b']
# Get the outputs
out_a, out_b = get_output([out_a_layer, out_b_layer])
# Get the targets
gt_a = T.cast(target[:, 0], 'int32')
gt_b = target[:, 1].reshape((-1, 1))
# Calculate the multi task loss
cls_loss = aggregate(categorical_crossentropy(out_a, gt_a))
reg_loss = aggregate(categorical_crossentropy(out_b, gt_b))
loss = cls_loss + reg_loss
return loss
# test that both branches of the multi output network are included,
# and also that a single layer isn't included multiple times.
l = InputLayer(shape=(None, 1, 28, 28), name="input")
l = Conv2DLayer(l, name='conv1', filter_size=(5, 5), num_filters=8)
l = Conv2DLayer(l, name='conv2', filter_size=(5, 5), num_filters=8)
la = DenseLayer(l, name='hidden_a', num_units=128)
la = DenseLayer(la, name='output_a', nonlinearity=softmax,
num_units=10)
lb = DenseLayer(l, name='hidden_b', num_units=128)
lb = DenseLayer(lb, name='output_b', nonlinearity=sigmoid, num_units=1)
net = NeuralNet(layers=[la, lb],
update_learning_rate=0.5,
y_tensor_type=None,
regression=True,
objective=objective)
net.initialize()
return net
def test_layers_included(self, mo_net):
expected_names = sorted(["input", "conv1", "conv2",
"hidden_a", "output_a",
"hidden_b", "output_b"])
network_names = sorted(list(mo_net.layers_.keys()))
assert (expected_names == network_names)
def test_predict(self, mo_net):
dummy_data = np.zeros((2, 1, 28, 28), np.float32)
p_cls, p_reg = mo_net.predict(dummy_data)
assert(p_cls.shape == (2, 10))
assert(p_reg.shape == (2, 1))
| mit |
bundgus/python-playground | matplotlib-playground/examples/pylab_examples/trigradient_demo.py | 1 | 3079 | """
Demonstrates computation of gradient with matplotlib.tri.CubicTriInterpolator.
"""
from matplotlib.tri import Triangulation, UniformTriRefiner,\
CubicTriInterpolator
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import math
#-----------------------------------------------------------------------------
# Electrical potential of a dipole
#-----------------------------------------------------------------------------
def dipole_potential(x, y):
""" The electric dipole potential V """
r_sq = x**2 + y**2
theta = np.arctan2(y, x)
z = np.cos(theta)/r_sq
return (np.max(z) - z) / (np.max(z) - np.min(z))
#-----------------------------------------------------------------------------
# Creating a Triangulation
#-----------------------------------------------------------------------------
# First create the x and y coordinates of the points.
n_angles = 30
n_radii = 10
min_radius = 0.2
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0, 2*math.pi, n_angles, endpoint=False)
angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
angles[:, 1::2] += math.pi/n_angles
x = (radii*np.cos(angles)).flatten()
y = (radii*np.sin(angles)).flatten()
V = dipole_potential(x, y)
# Create the Triangulation; no triangles specified so Delaunay triangulation
# created.
triang = Triangulation(x, y)
# Mask off unwanted triangles.
xmid = x[triang.triangles].mean(axis=1)
ymid = y[triang.triangles].mean(axis=1)
mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)
triang.set_mask(mask)
#-----------------------------------------------------------------------------
# Refine data - interpolates the electrical potential V
#-----------------------------------------------------------------------------
refiner = UniformTriRefiner(triang)
tri_refi, z_test_refi = refiner.refine_field(V, subdiv=3)
#-----------------------------------------------------------------------------
# Computes the electrical field (Ex, Ey) as gradient of electrical potential
#-----------------------------------------------------------------------------
tci = CubicTriInterpolator(triang, -V)
# Gradient requested here at the mesh nodes but could be anywhere else:
(Ex, Ey) = tci.gradient(triang.x, triang.y)
E_norm = np.sqrt(Ex**2 + Ey**2)
#-----------------------------------------------------------------------------
# Plot the triangulation, the potential iso-contours and the vector field
#-----------------------------------------------------------------------------
plt.figure()
plt.gca().set_aspect('equal')
plt.triplot(triang, color='0.8')
levels = np.arange(0., 1., 0.01)
cmap = cm.get_cmap(name='hot', lut=None)
plt.tricontour(tri_refi, z_test_refi, levels=levels, cmap=cmap,
linewidths=[2.0, 1.0, 1.0, 1.0])
# Plots direction of the electrical vector field
plt.quiver(triang.x, triang.y, Ex/E_norm, Ey/E_norm,
units='xy', scale=10., zorder=3, color='blue',
width=0.007, headwidth=3., headlength=4.)
plt.title('Gradient plot: an electrical dipole')
plt.show()
| mit |
sunzhxjs/JobGIS | lib/python2.7/site-packages/numpy/core/tests/test_multiarray.py | 12 | 221093 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
#check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert isinstance(t, MyArray)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5864
return
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
r = func(*inputs, **kw)
if 'out' in kw:
return r
else:
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_dot_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(B, C), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert memoryview(c).strides == (800, 80, 8)
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert memoryview(fortran).strides == (8, 80, 800)
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
if __name__ == "__main__":
run_module_suite()
| mit |
LiaoPan/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
paulray/NICERsoft | scripts/ni_Htest_sortgti.py | 1 | 25149 | #!/usr/bin/env python
# Version: 1.0
# Author: M. Kerr (updated by S. Guillot)
from __future__ import division, print_function
import argparse
from collections import deque
import glob
import os
import sys
from subprocess import check_call
import astropy.units as u
from astropy import log
from astropy.io import fits
from astropy.time import Time
import numpy as np
from pint.fits_utils import read_fits_event_mjds
from pint.eventstats import h2sig,hm,sig2sigma
import scipy.stats
from scipy.stats import chi2
from nicer.values import datadir,KEV_TO_PI
desc= """
Read one or more event files
to sort GTI by background rate
and evaluate H-test
"""
parser = argparse.ArgumentParser(description = desc)
parser.add_argument("infile", help="file or text file with list of event file", nargs='+')
parser.add_argument("outfile", help="name for output files")
parser.add_argument("--emin", help="Minimum energy to include (keV, default=0.25)", type=float, default=0.25)
parser.add_argument("--emax", help="Maximum energy to include (keV, default=2.00)", type=float, default=2.00)
parser.add_argument("--maxemin", help="Maximum emin to use in grid search (keV, default=1.00)", type=float, default=1.00)
parser.add_argument("--minemax", help="Minimum emax to use in grid search (keV, default=1.00)", type=float, default=1.00)
parser.add_argument("--delta_emin", help="Step size of the lower bound of the search grid (keV, default=0.01)", type=float, default=0.01)
parser.add_argument("--delta_emax", help="Step size of the lower bound of the search grid (keV, default=0.02)", type=float, default=0.02)
parser.add_argument("--gridsearch", help="Search over energies to find max H-test", action="store_true",default=False)
parser.add_argument("--minbw", help="Minimum fractional bandwidth used during energy grid searching. E.g., --minbw=0.5 would allow a 1.0 to 1.5 (50%%) keV energy range, but not a 2.0 to 2.2 (10%%) range.", type=float,default=None)
parser.add_argument("--minexp", help="Minimum exposure allowed for a candidate cut, expressed as a fraction of the total. E.g., --minexp=0.2 would allow a cut that throws away 80%% of the GTI.", type=float,default=None)
parser.add_argument("--mingti", help="Minimum GTI length to allow -- short GTIs don't give a reliable count rate. (seconds, default=10.0)", type=float, default=10.0)
parser.add_argument("--nopulsetest", help="Only use the predicted S/N to determine the GTIs to use.", action="store_true",default=False)
parser.add_argument("--verbosity", help="Verbosity (0=quiet,1=default,2=verbose,3=very verbose).", type=int, default=1)
parser.add_argument("--writegti", help="Write out the GTI corresponding to the event selection.", action="store_true",default=False)
parser.add_argument("--writeevents", help="Write out the corresponding event file", action="store_true",default=False)
parser.add_argument("--remote", help="Disable interactive plotting backend", action="store_true",default=False)
parser.add_argument("--usez", help="Use Z^2_2 test instead of H test.", action="store_true",default=False)
parser.add_argument("--nbins", help="Number of bins for plotting pulse profile (default=16)", type=int, default=16)
parser.add_argument("--name", help="Pulsar name for output figure", type=str, default='')
args = parser.parse_args()
import matplotlib
if (args.remote):
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rc('font', size=14) # controls default text sizes
plt.rc('axes', labelsize=13) # fontsize of the x and y labels
plt.rc('xtick', labelsize=13) # fontsize of the tick labels
plt.rc('ytick', labelsize=13) # fontsize of the tick labels
plt.rc('legend', fontsize=14) # legend fontsize
plt.rc('axes', linewidth=1.5)
plt.rc('xtick.major', size=4, width=1.5)
plt.rc('ytick.major', size=4, width=1.5)
def local_h2sig(h):
h = np.atleast_1d(h)
rvals = np.zeros_like(h)
for ix,x in enumerate(h):
if x > 0:
rvals[ix] = h2sig(x)
return rvals
def get_sigma(hs,usez=False):
if usez:
return np.atleast_1d(sig2sigma(chi2.sf(hs,4)))
else:
return local_h2sig(hs)
class Data(object):
""" Encapsulate data from one or more event files.
This is simply intended to replace the tuple that's being used
with something a little more descriptive."""
def __init__(self,data_tuple):
""" data_tuple = [times,phases,PIs,gti_starts,gti_stops]"""
self.times = data_tuple[0]
self.phases = data_tuple[1]
self.pis = data_tuple[2]
a = np.argsort(data_tuple[3])
self.gti_t0s = data_tuple[3][a]
self.gti_t1s = data_tuple[4][a]
def get_contiguous_gtis(self):
""" Return a Data object with any contiguous GTIs merged."""
# gti starts and stops
gti_start = self.gti_t0s
gti_stop = self.gti_t1s
# quick and dirty loop -- array
out_gtis = deque()
out_gtis.append([gti_start[0],gti_stop[0]])
for start,stop in zip(gti_start[1:],gti_stop[1:]):
# if start is same value is last stop, just update
if start == out_gtis[-1][1]:
out_gtis[-1][1] = stop
else:
out_gtis.append([start,stop])
t0s,t1s = np.asarray(out_gtis).transpose()
return Data((self.times,self.phases,self.pis,t0s,t1s))
def dice_gtis(self,tmax=100):
""" Break GTIs into small pieces to handle rate variations.
tmax -- target longest GTI (s)"""
new_t0s = deque()
new_t1s = deque()
for t0,t1 in zip(self.gti_t0s,self.gti_t1s):
dt = t1-t0
if dt < tmax:
new_t0s.append(t0)
new_t1s.append(t1)
else:
# break up GTI in such a way to avoid losing time (to tmin)
# and to avoid having pieces longer than tmax
npiece = int(np.floor(dt/tmax))+1
new_edges = np.linspace(t0,t1,npiece+1)
for it0,it1 in zip(new_edges[:-1],new_edges[1:]):
new_t0s.append(it0)
new_t1s.append(it1)
return Data((self.times,self.phases,self.pis,
np.asarray(new_t0s),np.asarray(new_t1s)))
def apply_min_gti(self,min_gti=10):
""" Return a new data object with all short GTIs removed.
All events lying in deleted GTIs are likewise removed."""
# determine which GTI each event belongs to
gti_idx = np.searchsorted(self.gti_t1s,self.times)
# length of GTI for each event
gti_len = (self.gti_t1s-self.gti_t0s)[gti_idx]
mask = gti_len >= min_gti
gti_mask = (self.gti_t1s-self.gti_t0s) >= min_gti
return Data((self.times[mask],self.phases[mask],self.pis[mask],
self.gti_t0s[gti_mask],self.gti_t1s[gti_mask]))
def apply_pi_mask(self,emin,emax):
""" Return a mask selecting only events satisfying emin <= E < emax.
"""
mask = (self.pis >= int(round(emin*KEV_TO_PI))) & \
(self.pis < int(round(emax*KEV_TO_PI)))
return Data((self.times[mask],self.phases[mask],self.pis[mask],
self.gti_t0s,self.gti_t1s))
def get_gti_data(self):
""" Return ..."""
pass
def check_gti(self):
""" Sanity check method to verify all times lie within a GTI.
"""
gti_idx = np.searchsorted(self.gti_t1s,self.times)
m1 = self.times >= self.gti_t0s[gti_idx]
m2 = self.times < self.gti_t1s[gti_idx]
return np.all(m1 & m2)
def runcmd(cmd):
# CMD should be a list of strings since it is not processed by a shell
log.info('CMD: '+" ".join(cmd))
os.system(" ".join(cmd))
## Some ftools calls don't work properly with check_call...not sure why!
## so I am using os.system instead of check_call
#check_call(cmd,env=os.environ)
def load_files(fnames):
""" Load in time stamps, PIs, GTIs, etc. from all files."""
gtis = deque()
times = deque()
pis = deque()
phases = deque()
for fname in fnames:
f = fits.open(fname)
try:
tzero = f['gti']._header['TIMEZERO']
except KeyError:
tzero = 0
t0s = f['gti'].data.field('start') + tzero
t1s = f['gti'].data.field('stop') + tzero
for t0,t1 in zip(t0s,t1s):
gtis.append([t0,t1])
try:
tzero = f['events']._header['TIMEZERO']
except KeyError:
tzero = 0
times.append(f['events'].data.field('time') + tzero )
pis.append(f['events'].data.field('pi'))
try:
phases.append(f['events'].data.field('pulse_phase'))
except:
pass
f.close()
times = np.concatenate(times)
pis = np.concatenate(pis)
if len(phases) > 0:
phases = np.concatenate(phases)
else:
phases = None
t0s,t1s = np.asarray(list(gtis)).transpose()
#return times,phases,pis,t0s,t1s
return Data((times,phases,pis,t0s,t1s))
def dice_gtis(data,tmax=100):
""" Break larger GTIs into small pieces to handle rate variations."""
times,phases,pis,t0s,t1s = data
new_t0s = deque()
new_t1s = deque()
for t0,t1 in zip(t0s,t1s):
dt = t1-t0
if dt < tmax:
new_t0s.append(t0)
new_t1s.append(t1)
else:
# break up GTI in such a way to avoid losing time (to tmin) and
# to avoid having pieces longer than tmax
npiece = int(np.floor(dt/tmax))+1
new_edges = np.linspace(t0,t1,npiece+1)
for it0,it1 in zip(new_edges[:-1],new_edges[1:]):
new_t0s.append(it0)
new_t1s.append(it1)
return times,phases,pis,np.asarray(new_t0s),np.asarray(new_t1s)
def write_gtis(gti_start, gti_stop, outfile, merge_gti=False):
# write out GTIs -- sort them by time rather than bkg
a = np.argsort(gti_start)
gti_start = gti_start[a]
gti_stop = gti_stop[a]
# merge adjacent GTI -- quick and dirty loop
out_gtis = deque()
out_gtis.append([gti_start[0],gti_stop[0]])
for start,stop in zip(gti_start[1:],gti_stop[1:]):
# if start is same value is last stop, just update
if merge_gti and (start == out_gtis[-1][1]):
out_gtis[-1][1] = stop
else:
out_gtis.append([start,stop])
out_gtis = np.asarray(out_gtis)
np.savetxt("{}_OptimalGTI.txt".format(outfile),out_gtis)
# Checking the presence of gti header and columns in data/
gticolumns = os.path.join(datadir,'gti_columns.txt')
gtiheader = os.path.join(datadir,'gti_header.txt')
# make sure TIMEZERO == 0
lines = open(gtiheader).readlines()
for line in lines:
toks = line.split('=')
if (len(toks) > 0) and (toks[0].strip()=='TIMEZERO'):
if float(toks[1].strip().split()[0]) != 0:
print('WARNING! TIMEZERO in output GTI not consistent.')
break
## Making the GTI file from the text file
log.info("Making the GTI file gti.fits from the GTI data textfile")
cmd = ['ftcreate', '{}'.format(gticolumns), '{}_OptimalGTI.txt'.format(outfile), '{}_OptimalGTI.fits'.format(outfile), 'headfile={}'.format(gtiheader), 'extname="GTI"', 'clobber=yes']
runcmd(cmd)
## Extracting the new event file using the new GTI file created
if args.writeevents:
if len(args.infile)==1:
eventfile = args.infile[0]
outevtfile = "{}_OptimalEvents.fits".format(outfile)
cmd = ['niextract-events', '{0}'.format(eventfile), '{0}'.format(outevtfile), 'timefile={}_OptimalGTI.fits'.format(outfile), 'clobber=yes']
runcmd(cmd)
else:
log.warning("Cannot create events file. niextract-events needs a single file or a list of events files (@list.txt)")
def ensemble_htest(phases,indices,m=20,c=4):
""" Calculate H-test statistic for subsets of a set of phases.
Cache intermediate products to avoid O(N^2) complexity!
"""
phases = np.asarray(phases)*(2*np.pi) # in radians and copy
cache = np.empty((2*m,len(phases)))
for i in range(m):
cache[2*i] = np.cos((i+1)*phases)
cache[2*i+1] = np.sin((i+1)*phases)
rvals = np.zeros(len(indices))
penalty = c*np.arange(0,m)
idx_mask = indices>0
np.cumsum(cache,axis=1,out=cache)
t = cache[:,indices[idx_mask>0]-1]**2
t = np.cumsum(t[::2,...] + t[1::2,...],axis=0)*(2./indices[idx_mask])
rvals[idx_mask] = np.max(t-penalty[:,None],axis=0)
return rvals
def ensemble_ztest(phases,indices,m=2):
""" Calculate H-test statistic for subsets of a set of phases.
Cache intermediate products to avoid O(N^2) complexity!
"""
phases = np.asarray(phases)*(2*np.pi) # in radians and copy
cache = np.empty((2*m,len(phases)))
for i in range(m):
cache[2*i] = np.cos((i+1)*phases)
cache[2*i+1] = np.sin((i+1)*phases)
rvals = np.zeros(len(indices))
idx_mask = indices>0
np.cumsum(cache,axis=1,out=cache)
t = cache[:,indices[idx_mask>0]-1]**2
t = np.sum(t[::2,...] + t[1::2,...],axis=0)*(2./indices[idx_mask])
rvals[idx_mask] = t
return rvals
def make_sn(data,rate=0.1,usez=False,snonly=False,minexp=None):
""" data -- output of load_local
mask -- optional mask to select events (e.g. on PI)
rate -- assumed rate for S/N calculation in ct/sec
min_gti -- minimum GTI length in seconds
usez -- use Z^2 test instead of H test
snonly -- skip computation of pulsed statistic, only do S/N
"""
#times,phases,pis,t0s,t1s = data
times = data.times
phases = data.phases
pis = data.pis
t0s = data.gti_t0s
t1s = data.gti_t1s
# determine which gti each event belongs to
gti_idx = np.searchsorted(t1s,times)
# count events in each gti
gti_cts = np.bincount(gti_idx,minlength=len(t1s))
gti_len = t1s-t0s
rates = (gti_cts / gti_len)
a = np.argsort(rates)
gti_t0_s = t0s[a]
gti_t1_s = t1s[a]
gti_len_s = gti_len[a]
gti_cts_s = gti_cts[a]
gti_rts_s = gti_cts_s/gti_len_s
sn = rate*np.cumsum(gti_len_s)/np.sqrt(np.cumsum(gti_cts_s)+rate*gti_len_s)
rate = 0
zero_mask = gti_cts_s > 0
sn0 = np.empty(len(gti_len_s))
sn0[zero_mask] = np.cumsum(gti_len_s[zero_mask])/np.sqrt(np.cumsum(gti_cts_s[zero_mask]))
sn0[~zero_mask] = np.inf
counter = 0
pi_gti = deque()
for i,ct in enumerate(gti_cts):
pi_gti.append(pis[counter:counter+ct])
counter += ct
# apply sorting
pi_gti = [pi_gti[i] for i in a]
if (not snonly) and (phases is not None):
counter = 0
ph_gti = deque()
for i,ct in enumerate(gti_cts):
ph_gti.append(phases[counter:counter+ct])
counter += ct
# apply sorting
ph_gti = [ph_gti[i] for i in a]
# make a set of slices
nph = np.cumsum([len(phg) for phg in ph_gti])
# calculate H test
if usez:
hs = ensemble_ztest(np.concatenate(ph_gti),nph)
else:
hs = ensemble_htest(np.concatenate(ph_gti),nph)
else:
hs = None
ph_gti = None
if minexp is not None:
exposure = np.cumsum(gti_len_s)
# depress S/N for values that do not satisfy eposure cuts
mask = exposure/exposure[-1] < minexp
sn[mask] = 0
sn0[mask] = 0
hs[mask] = 0
return sn,sn0,hs,ph_gti,list(pi_gti),gti_rts_s,gti_len_s,gti_t0_s,gti_t1_s
if len(args.infile)==1:
if args.infile[0].startswith('@'):
inputfile = args.infile[0].split('@')[1]
log.info('Reading input ObsID list: {}'.format(inputfile))
all_files = np.loadtxt(inputfile,dtype=str)
else:
all_files = args.infile
else:
all_files = args.infile
data = load_files(all_files)
data_diced = data.dice_gtis(tmax=100)
data_diced = data_diced.apply_min_gti(args.mingti)
assert(data_diced.check_gti())
if args.writeevents:
args.writegti=True
if args.writegti:
# Checking the presence of HEASOFT
try:
check_call('nicerversion',env=os.environ)
except:
print("You need to initialize FTOOLS/HEASOFT first (e.g., type 'heainit')!", file=sys.stderr)
exit()
# Checking the presence of gti header and columns in data/
gticolumns = os.path.join(datadir,'gti_columns.txt')
gtiheader = os.path.join(datadir,'gti_header.txt')
if not os.path.isfile(gtiheader) or not os.path.isfile(gticolumns):
log.error('The files gti_header.txt or gti_columns.txt are missing. Check the {} directory'.format(os.path.abspath(datadir)))
exit()
if args.gridsearch:
all_emin = np.arange(args.emin,args.maxemin+0.005,args.delta_emin)
else:
all_emin = np.array([args.emin])
hbest = 0.0
eminbest = 0.0
emaxbest = 100.0
eminlist = []
emaxlist = []
hgrid = []
for emin in all_emin:
if args.gridsearch:
all_emax = np.arange(args.minemax,args.emax+0.005,args.delta_emax)
else:
args.delta_emax = 0
all_emax = np.array([args.emax])
if len(all_emax) == 0:
break
if (args.verbosity >= 1):
print("emin={:0.2f}, emax ranging from {:0.2f}-{:0.2f} by {:0.2f} keV".format(emin,all_emax[0],all_emax[-1],args.delta_emax))
for emax in all_emax:
if (args.verbosity >= 3):
print(" emin={:0.2f}, emax={:0.2f}".format(emin,emax))
# test for energy bandwidth
if args.gridsearch and (args.minbw is not None):
if emax/emin-1 < args.minbw:
if (args.verbosity >= 2):
print(' excluding emin={:0.2f}, emax={:0.2f} because smaller than specified minbw'.format(emin,emax))
continue
local_data = data_diced.apply_pi_mask(emin,emax)
pred_rate = 0.05/10.0 # 2241
sn,sn0,hs,ph_gti,pi_gti,gti_rts_s,gti_len_s,gti_t0_s,gti_t1_s = \
make_sn(local_data,rate=pred_rate,usez=args.usez,
minexp=args.minexp)
exposure = np.cumsum(gti_len_s)
# scale exposure to the expected S/N
amax = np.argmax(sn)
exposure_scale = sn[amax]/exposure[amax]**0.5
if args.nopulsetest:
Hmax = amax
else:
Hmax = np.argmax(hs)
if not args.gridsearch:
# Make output plots after single iteration.
plt.figure(5); plt.clf()
hsig = get_sigma(hs,usez=args.usez)
if args.usez:
plt.plot(gti_rts_s,hsig,label='Z-test significance')
else:
plt.plot(gti_rts_s,hsig,label='H-test significance')
plt.axvline(gti_rts_s[amax],color='k',ls='--',label='No H-test (sig={:0.3f})'.format(hsig[amax]))
if not args.nopulsetest:
plt.axvline(gti_rts_s[Hmax],color='r',ls='--',label='Max H-test (sig={:0.3f})'.format(hsig[Hmax]))
plt.xlabel('Background Rate (ct/s)')
plt.ylabel('Significance (sigma)')
plt.title('{} - [{:0.2f},{:0.2f}]'.format(args.name,emin,emax))
plt.legend(loc='lower right')
plt.savefig('{}_sig.png'.format(args.outfile))
plt.clf()
nbins=args.nbins
select_ph = np.concatenate(ph_gti[:Hmax+1]).ravel()
profbins = np.linspace(0.0,1.0,nbins+1,endpoint=True)
profile, edges = np.histogram(select_ph,bins=profbins)
bbins = np.concatenate((profbins, profbins[1:]+1.0, profbins[1:]+2.0))
fullprof = np.concatenate((profile,profile,profile,np.array([profile[0]])))
plt.errorbar(bbins-(0.5/nbins),fullprof,
yerr=fullprof**0.5,
marker ='',
drawstyle='steps-mid',
linewidth=1.5,
color='k',
label= '{:0.2f}-{:0.2f} keV'.format(emin,emax)
)
#plt.subplots_adjust(left=0.15, right=0.93) #, bottom=0.1)
plt.tight_layout()
plt.xlim((0.0,2.0))
plt.ylabel('Photons')
plt.xlabel('Phase')
plt.title(args.name)
plt.legend(loc='upper left')
plt.savefig('{}_profile.png'.format(args.outfile))
plt.clf()
if args.writegti:
write_gtis(gti_t0_s[:Hmax+1],gti_t1_s[:Hmax+1],args.outfile)
eminbest = emin
emaxbest = emax
else:
# store data for future comparison
eminlist.append(emin)
emaxlist.append(emax)
hsig = get_sigma(hs[Hmax],usez=args.usez)[0]
hgrid.append(hsig)
if hsig>=hbest:
hbest=hsig
eminbest=emin
emaxbest=emax
if args.gridsearch:
# recreate data optimization -- really need to encapsulate this!
local_data = data_diced.apply_pi_mask(eminbest,emaxbest)
pred_rate = 0.05/10.0 # 2241
sn,sn0,hs,ph_gti,pi_gti,gti_rts_s,gti_len_s,gti_t0_s,gti_t1_s = \
make_sn(local_data,rate=pred_rate,usez=args.usez,
minexp=args.minexp)
exposure = np.cumsum(gti_len_s)
hsig = get_sigma(hs,usez=args.usez)
# scale exposure to the expected S/N
amax = np.argmax(sn)
exposure_scale = sn[amax]/exposure[amax]**0.5
if args.nopulsetest:
Hmax = amax
else:
Hmax = np.argmax(hsig)
plt.figure(5); plt.clf()
if args.usez:
plt.plot(gti_rts_s,hsig,label='Z-test significance')
else:
plt.plot(gti_rts_s,hsig,label='H-test significance')
plt.axvline(gti_rts_s[amax],color='k',ls='--',label='No H-test (sig={:0.3f})'.format(hsig[amax]))
if not args.nopulsetest:
plt.axvline(gti_rts_s[Hmax],color='r',ls='--',label='Max H-test (sig={:0.3f})'.format(hsig[Hmax]))
plt.xlabel('Background Rate (ct/s)')
plt.ylabel('Significance (sigma)')
plt.title('{} - [{:0.2f},{:0.2f}]'.format(args.name,eminbest,emaxbest))
plt.legend(loc='lower right')
plt.savefig('{}_sig_bestrange.png'.format(args.outfile))
plt.clf()
plt.scatter(eminlist,emaxlist, c=hgrid, s=10, edgecolor='')
cbar = plt.colorbar()
if args.usez:
cbar.set_label('Z-test')
else:
cbar.set_label('H-test')
plt.xlabel('Low Energy Cut (keV)')
plt.ylabel('High Energy Cut (keV)')
plt.savefig('{}_grid.png'.format(args.outfile))
plt.clf()
nbins=args.nbins
select_ph = np.concatenate(ph_gti[:Hmax+1]).ravel()
profbins = np.linspace(0.0,1.0,nbins+1,endpoint=True)
profile, edges = np.histogram(select_ph,bins=profbins)
bbins = np.concatenate((profbins, profbins[1:]+1.0, profbins[1:]+2.0))
fullprof = np.concatenate((profile,profile,profile,np.array([profile[0]])))
plt.errorbar(bbins-(0.5/nbins),fullprof,
yerr=fullprof**0.5,
marker ='',
drawstyle='steps-mid',
linewidth=1.5,
color='k',
label= '{:0.2f}-{:0.2f} keV'.format(eminbest,emaxbest)
)
plt.xlim((0.0,2.0))
plt.ylabel('Photons')
plt.xlabel('Phase')
plt.title(args.name)
plt.savefig('{}_profile.png'.format(args.outfile))
print("Maximum significance: {:0.3f} sigma".format(hsig[Hmax]))
print(" obtained in {:0.2f} (out of {:0.2f} ksec)".format(
exposure[Hmax]/1000,exposure[-1]/1000))
print(" between {:0.2f} and {:0.2f} keV".format(eminbest,emaxbest))
print(" for {} events".format(len(select_ph)))
if args.writegti:
write_gtis(gti_t0_s[:Hmax+1],gti_t1_s[:Hmax+1],args.outfile)
else:
print("Maximum significance: {:0.3f} sigma".format(hsig[Hmax]))
print(" obtained in {:0.2f} ksec (out of {:0.2f} ksec)".format(exposure[Hmax]/1000,exposure[-1]/1000))
print(" for {} events".format(len(select_ph)))
# output summary results to text file
a50 = int(round(len(gti_rts_s)*0.5))
a90 = int(round(len(gti_rts_s)*0.9))
output = open('%s_stats.txt'%args.outfile,'w')
output.write('ni_Htest_sortGTI.py invoked as follows: \n')
output.write(' '.join(sys.argv) + '\n')
output.write('Optimal TS (%s-test): %.2f (%.2f sigma).\n'%('Z' if args.usez else 'H',hs[Hmax],hsig[Hmax]))
output.write('Optimal energy range: %.2f to %.2f keV.\n'%(eminbest,emaxbest))
output.write('Total exposure : {:0.2f} kiloseconds.\n'.format(exposure[-1]/1000))
output.write('Optimal exposure: {:0.2f} kiloseconds.\n'.format(exposure[Hmax]/1000))
output.write('Optimal GTI count rate: %.3f cps.\n'%(gti_rts_s[Hmax]))
output.write('-----------------------\n')
output.write('Median (50%%) GTI count rate: %.3f cps.\n'%(gti_rts_s[a50]))
output.write('Exposure for GTIs <= 50%% count rate : %.2f kilseconds.\n'%(exposure[a50]/1000))
output.write('-----------------------\n')
output.write('90th Percentile GTI count rate: %.3f cps.\n'%(gti_rts_s[a90]))
output.write('Exposure for GTIs <= 90%% count rate: %.2f kilseconds.\n'%(exposure[a90]/1000))
output.close()
| mit |
soulmachine/scikit-learn | examples/plot_multilabel.py | 25 | 4261 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
pl.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
pl.subplot(2, 2, subplot)
pl.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
pl.scatter(X[:, 0], X[:, 1], s=40, c='gray')
pl.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
pl.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
pl.xticks(())
pl.yticks(())
pl.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
pl.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
pl.xlabel('First principal component')
pl.ylabel('Second principal component')
pl.legend(loc="upper left")
pl.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
pl.subplots_adjust(.04, .02, .97, .94, .09, .2)
pl.show()
| bsd-3-clause |
kai5263499/networkx | examples/drawing/lanl_routes.py | 10 | 2009 | #!/usr/bin/env python
"""
Routes to LANL from 186 sites on the Internet.
This uses Graphviz for layout so you need PyGraphviz or Pydot.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2004-2008
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
def lanl_graph():
""" Return the lanl internet view graph from lanl.edges
"""
import networkx as nx
try:
fh=open('lanl_routes.edgelist','r')
except IOError:
print "lanl.edges not found"
raise
G=nx.Graph()
time={}
time[0]=0 # assign 0 to center node
for line in fh.readlines():
(head,tail,rtt)=line.split()
G.add_edge(int(head),int(tail))
time[int(head)]=float(rtt)
# get largest component and assign ping times to G0time dictionary
G0=nx.connected_component_subgraphs(G)[0]
G0.rtt={}
for n in G0:
G0.rtt[n]=time[n]
return G0
if __name__ == '__main__':
import networkx as nx
import math
try:
from networkx import graphviz_layout
except ImportError:
raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot")
G=lanl_graph()
print "graph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G))
print nx.number_connected_components(G),"connected components"
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
# use graphviz to find radial layout
pos=nx.graphviz_layout(G,prog="twopi",root=0)
# draw nodes, coloring by rtt ping time
nx.draw(G,pos,
node_color=[G.rtt[v] for v in G],
with_labels=False,
alpha=0.5,
node_size=15)
# adjust the plot limits
xmax=1.02*max(xx for xx,yy in pos.values())
ymax=1.02*max(yy for xx,yy in pos.values())
plt.xlim(0,xmax)
plt.ylim(0,ymax)
plt.savefig("lanl_routes.png")
| bsd-3-clause |
thomasdouenne/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/examples/all_consumptions/plot_logement_decile.py | 4 | 2329 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 29 11:00:08 2015
@author: Etienne
"""
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
from pandas import concat
from openfisca_france_indirect_taxation.examples.utils_example import simulate, df_weighted_average_grouped, \
graph_builder_line_percent
if __name__ == '__main__':
import logging
log = logging.getLogger(__name__)
import sys
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
list_coicop12 = []
for coicop12_index in range(1, 13):
list_coicop12.append('coicop12_{}'.format(coicop12_index))
simulated_variables = [
'pondmen',
'niveau_vie_decile',
'somme_coicop12',
]
simulated_variables += list_coicop12
p = dict()
df_to_graph = None
for year in [2000, 2005, 2011]:
simulation_data_frame = simulate(simulated_variables = simulated_variables, year = year)
aggregates_data_frame = df_weighted_average_grouped(dataframe = simulation_data_frame,
groupe = 'niveau_vie_decile', varlist = simulated_variables)
aggregates_data_frame[year] = aggregates_data_frame['coicop12_4'] / aggregates_data_frame['somme_coicop12']
appendable = aggregates_data_frame[year]
if df_to_graph is not None:
df_to_graph = concat([df_to_graph, appendable], axis = 1)
else:
df_to_graph = appendable
graph_builder_line_percent(df_to_graph, 1, 1)
| agpl-3.0 |
Unidata/MetPy | v0.4/_downloads/Station_Plot_with_Layout.py | 2 | 8567 | # Copyright (c) 2008-2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Station Plot with Layout
========================
Make a station plot, complete with sky cover and weather symbols, using a
station plot layout built into MetPy.
The station plot itself is straightforward, but there is a bit of code to perform the
data-wrangling (hopefully that situation will improve in the future). Certainly, if you have
existing point data in a format you can work with trivially, the station plot will be simple.
The `StationPlotLayout` class is used to standardize the plotting various parameters
(i.e. temperature), keeping track of the location, formatting, and even the units for use in
the station plot. This makes it easy (if using standardized names) to re-use a given layout
of a station plot.
"""
import cartopy.crs as ccrs
import cartopy.feature as feat
import matplotlib.pyplot as plt
import numpy as np
from metpy.calc import get_wind_components
from metpy.cbook import get_test_data
from metpy.plots import simple_layout, StationPlot, StationPlotLayout
from metpy.units import units
###########################################
# The setup
# ---------
#
# First read in the data. We use `numpy.loadtxt` to read in the data and use a structured
# `numpy.dtype` to allow different types for the various columns. This allows us to handle
# the columns with string data.
f = get_test_data('station_data.txt')
all_data = np.loadtxt(f, skiprows=1, delimiter=',',
usecols=(1, 2, 3, 4, 5, 6, 7, 17, 18, 19),
dtype=np.dtype([('stid', '3S'), ('lat', 'f'), ('lon', 'f'),
('slp', 'f'), ('air_temperature', 'f'),
('cloud_fraction', 'f'), ('dewpoint', 'f'),
('weather', '16S'),
('wind_dir', 'f'), ('wind_speed', 'f')]))
###########################################
# This sample data has *way* too many stations to plot all of them. Instead, we just select
# a few from around the U.S. and pull those out of the data file.
# Get the full list of stations in the data
all_stids = [s.decode('ascii') for s in all_data['stid']]
# Pull out these specific stations
whitelist = ['OKC', 'ICT', 'GLD', 'MEM', 'BOS', 'MIA', 'MOB', 'ABQ', 'PHX', 'TTF',
'ORD', 'BIL', 'BIS', 'CPR', 'LAX', 'ATL', 'MSP', 'SLC', 'DFW', 'NYC', 'PHL',
'PIT', 'IND', 'OLY', 'SYR', 'LEX', 'CHS', 'TLH', 'HOU', 'GJT', 'LBB', 'LSV',
'GRB', 'CLT', 'LNK', 'DSM', 'BOI', 'FSD', 'RAP', 'RIC', 'JAN', 'HSV', 'CRW',
'SAT', 'BUY', '0CO', 'ZPC', 'VIH']
# Loop over all the whitelisted sites, grab the first data, and concatenate them
data_arr = np.concatenate([all_data[all_stids.index(site)].reshape(1,) for site in whitelist])
# First, look at the names of variables that the layout is expecting:
simple_layout.names()
###########################################
# Next grab the simple variables out of the data we have (attaching correct units), and
# put them into a dictionary that we will hand the plotting function later:
# This is our container for the data
data = dict()
# Copy out to stage everything together. In an ideal world, this would happen on
# the data reading side of things, but we're not there yet.
data['longitude'] = data_arr['lon']
data['latitude'] = data_arr['lat']
data['air_temperature'] = data_arr['air_temperature'] * units.degC
data['dew_point_temperature'] = data_arr['dewpoint'] * units.degC
data['air_pressure_at_sea_level'] = data_arr['slp'] * units('mbar')
###########################################
# Notice that the names (the keys) in the dictionary are the same as those that the
# layout is expecting.
#
# Now perform a few conversions:
#
# - Get wind components from speed and direction
# - Convert cloud fraction values to integer codes [0 - 8]
# - Map METAR weather codes to WMO codes for weather symbols
# Get the wind components, converting from m/s to knots as will be appropriate
# for the station plot
u, v = get_wind_components(data_arr['wind_speed'] * units('m/s'),
data_arr['wind_dir'] * units.degree)
data['eastward_wind'], data['northward_wind'] = u, v
# Convert the fraction value into a code of 0-8, which can be used to pull out
# the appropriate symbol
data['cloud_coverage'] = (8 * data_arr['cloud_fraction']).astype(int)
# Map weather strings to WMO codes, which we can use to convert to symbols
# Only use the first symbol if there are multiple
wx_text = [s.decode('ascii') for s in data_arr['weather']]
wx_codes = {'': 0, 'HZ': 5, 'BR': 10, '-DZ': 51, 'DZ': 53, '+DZ': 55,
'-RA': 61, 'RA': 63, '+RA': 65, '-SN': 71, 'SN': 73, '+SN': 75}
data['present_weather'] = [wx_codes[s.split()[0] if ' ' in s else s] for s in wx_text]
###########################################
# All the data wrangling is finished, just need to set up plotting and go:
# Set up the map projection and set up a cartopy feature for state borders
proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=35,
standard_parallels=[35])
state_boundaries = feat.NaturalEarthFeature(category='cultural',
name='admin_1_states_provinces_lines',
scale='110m', facecolor='none')
###########################################
# The payoff
# ----------
# Change the DPI of the resulting figure. Higher DPI drastically improves the
# look of the text rendering
plt.rcParams['savefig.dpi'] = 255
# Create the figure and an axes set to the projection
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(1, 1, 1, projection=proj)
# Add some various map elements to the plot to make it recognizable
ax.add_feature(feat.LAND, zorder=-1)
ax.add_feature(feat.OCEAN, zorder=-1)
ax.add_feature(feat.LAKES, zorder=-1)
ax.coastlines(resolution='110m', zorder=2, color='black')
ax.add_feature(state_boundaries)
ax.add_feature(feat.BORDERS, linewidth='2', edgecolor='black')
# Set plot bounds
ax.set_extent((-118, -73, 23, 50))
#
# Here's the actual station plot
#
# Start the station plot by specifying the axes to draw on, as well as the
# lon/lat of the stations (with transform). We also the fontsize to 12 pt.
stationplot = StationPlot(ax, data['longitude'], data['latitude'],
transform=ccrs.PlateCarree(), fontsize=12)
# The layout knows where everything should go, and things are standardized using
# the names of variables. So the layout pulls arrays out of `data` and plots them
# using `stationplot`.
simple_layout.plot(stationplot, data)
plt.show()
###########################################
# or instead, a custom layout can be used:
# Just winds, temps, and dewpoint, with colors. Dewpoint and temp will be plotted
# out to Farenheit tenths. Extra data will be ignored
custom_layout = StationPlotLayout()
custom_layout.add_barb('eastward_wind', 'northward_wind', units='knots')
custom_layout.add_value('NW', 'air_temperature', fmt='.1f', units='degF', color='darkred')
custom_layout.add_value('SW', 'dew_point_temperature', fmt='.1f', units='degF',
color='darkgreen')
# Also, we'll add a field that we don't have in our dataset. This will be ignored
custom_layout.add_value('E', 'precipitation', fmt='0.2f', units='inch', color='blue')
# Create the figure and an axes set to the projection
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(1, 1, 1, projection=proj)
# Add some various map elements to the plot to make it recognizable
ax.add_feature(feat.LAND, zorder=-1)
ax.add_feature(feat.OCEAN, zorder=-1)
ax.add_feature(feat.LAKES, zorder=-1)
ax.coastlines(resolution='110m', zorder=2, color='black')
ax.add_feature(state_boundaries)
ax.add_feature(feat.BORDERS, linewidth='2', edgecolor='black')
# Set plot bounds
ax.set_extent((-118, -73, 23, 50))
#
# Here's the actual station plot
#
# Start the station plot by specifying the axes to draw on, as well as the
# lon/lat of the stations (with transform). We also the fontsize to 12 pt.
stationplot = StationPlot(ax, data['longitude'], data['latitude'],
transform=ccrs.PlateCarree(), fontsize=12)
# The layout knows where everything should go, and things are standardized using
# the names of variables. So the layout pulls arrays out of `data` and plots them
# using `stationplot`.
custom_layout.plot(stationplot, data)
plt.show()
| bsd-3-clause |
crawfordsm/pysalt | slottools/PhotometryConfigWidget.py | 2 | 22957 | ################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
"""
Module containing generic graphical user interface widgets.
"""
# Ensure python 2.5 compatibility
from __future__ import with_statement
import matplotlib.cm
# General imports
import pyfits
import numpy as np
# Gui library imports
from PyQt4 import QtGui, QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib.patches import CirclePolygon, Rectangle
# Salt imports
import saltsafeio
from salterror import SaltError, SaltIOError
from saltimagetools import find_object, zscale
class PhotometryConfigWidget(QtGui.QWidget):
"""Configure dialog for photometry.
Has settings for:
* target position, size
* target background
* type (anulus/region)
* parameters
* comparison position, size
* comparison background
* type (anulus/region)
* parameters
"""
def __init__(self, imdisplay, config, imlist=None, number=1, parent=None):
"""Setup widget.
*imdisplay* a `FitsDisplay` derived fits display widget,
*imlist* a list of fits image filenames,
*config* filename used for output configuration file,
*number* image number to load on startup,
*parent* parent widget.
"""
# Set default parameters
self.imlist=imlist
self.number=number
self.config=config
self.amp={'target' : 1, 'comparison' : 1 }
# Set default marker
self.mark_with='circle'
# Set default search distance for recentering
self.distance=5
# Default line style parameters
self.line={ 'target' : { 'color' : 'g', 'width' : 2 },
'comparison' : { 'color' : 'g', 'width' : 2 }}
# Import gui
from ui_photometryconfigwidget import Ui_PhotometryConfigWidget
# Setup widget
QtGui.QWidget.__init__(self, parent)
# Bind gui to widget
self.ui = Ui_PhotometryConfigWidget()
self.ui.setupUi(self)
# Destroy widget on close
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
# Connect to display window
self.imdisplay=imdisplay
# Connect position selected signal from display to event handler
self.connect(self.imdisplay, QtCore.SIGNAL('positionSelected(float, float)'), self.selectionHandler)
# Set current display widget for positionSelected signal
self.xdisplay=[]
self.ydisplay=[]
self.rdisplay=[]
# Keep track of currently displayed objects
self.display={'target' : {'position' : False,
'annulus' : False,
'region' : False },
'comparison' : {'position' : False,
'annulus' : False,
'region' : False }}
# Keep track of input widgets
self.parameters=['x','y','r','r1','r2','x1','y1','x2','y2']
self.input={'target' : { 'x' : self.ui.tgtXLineEdit,
'y' : self.ui.tgtYLineEdit,
'r' : self.ui.tgtRLineEdit,
'r1' : self.ui.tgtR1LineEdit,
'r2' : self.ui.tgtR2LineEdit,
'x1' : self.ui.tgtX1LineEdit,
'y1' : self.ui.tgtY1LineEdit,
'x2' : self.ui.tgtX2LineEdit,
'y2' : self.ui.tgtY2LineEdit},
'comparison' : { 'x' : self.ui.cmpXLineEdit,
'y' : self.ui.cmpYLineEdit,
'r' : self.ui.cmpRLineEdit,
'r1' : self.ui.cmpR1LineEdit,
'r2' : self.ui.cmpR2LineEdit,
'x1' : self.ui.cmpX1LineEdit,
'y1' : self.ui.cmpY1LineEdit,
'x2' : self.ui.cmpX2LineEdit,
'y2' : self.ui.cmpY2LineEdit}}
# Keep track of capture buttons
self.buttons=['position','radius','annulus','region']
self.capture={'target' \
: {'position' : self.ui.captureTgt,
'radius' : self.ui.captureTgtRadius,
'annulus' : self.ui.captureTgtAnulusBackground,
'region' : self.ui.captureTgtRegionBackground},
'comparison' \
: {'position' : self.ui.captureCmp,
'radius' : self.ui.captureCmpRadius,
'annulus' : self.ui.captureCmpAnulusBackground,
'region' : self.ui.captureCmpRegionBackground}}
# Keep track of checkbox recenter widgets
self.recenter={'target' : self.ui.tgtRecenterCheckBox,
'comparison' : self.ui.cmpRecenterCheckBox}
self.centered={'target' : False,
'comparison' : False}
# Enable blocking of redraws
self.block={'target' : { 'x' : False,
'y' : False,
'r' : False,
'r1' : False,
'r2' : False,
'x1' : False,
'y1' : False,
'x2' : False,
'y2' : False},
'comparison' : { 'x' : False,
'y' : False,
'r' : False,
'r1' : False,
'r2' : False,
'x1' : False,
'y1' : False,
'x2' : False,
'y2' : False}}
# Set validator to ensure valid input on lineEdit input widgets
self.validator = QtGui.QDoubleValidator(self)
for object in ['target','comparison']:
for key in self.parameters:
self.input[object][key].setValidator(self.validator)
# Set signal mapper for lineEdit updates
self.drawMapper = QtCore.QSignalMapper(self)
# Connect lineEdit updates to signal mapper
for object in ['target','comparison']:
for key in self.parameters:
# Add signal map entry
self.drawMapper.setMapping(self.input[object][key],
QtCore.QString(object+','+key))
# Connect to signal mapper
self.connect(self.input[object][key], QtCore.SIGNAL('textChanged(QString)'), self.drawMapper, QtCore.SLOT('map()'))
# Connect signal mapper to draw handler
self.connect(self.drawMapper, QtCore.SIGNAL('mapped(QString)'),
self.textUpdated)
# Set signal mapper for capture buttons
self.captureMapper = QtCore.QSignalMapper(self)
# Connect capture button signals to signal mapper
for object in ['target','comparison']:
for key in self.buttons:
# Add signal map entry
self.captureMapper.setMapping(self.capture[object][key],
QtCore.QString(object+','+key))
# Connect to signal mapper
self.connect(self.capture[object][key], QtCore.SIGNAL('clicked()'), self.captureMapper, QtCore.SLOT('map()'))
# Connect signal mapper to capture handler
self.connect(self.captureMapper, QtCore.SIGNAL('mapped(QString)'),
self.captureHandler)
# Connect save button
self.connect(self.ui.saveButton, QtCore.SIGNAL('clicked()'), self.save)
# If an image list is given
if self.imlist is not None:
# Connect image selection spinBox to event handlers
self.connect(self.ui.imageSpinBox, QtCore.SIGNAL('valueChanged(int)'), self.loadImage)
self.connect(self.ui.imageSpinBox, QtCore.SIGNAL('valueChanged(int)'), self.redraw)
# Load first image
self.setImageNumber(self.number)
# Hide end selection widgets (not implemented here)
self.ui.tgtEndPosLabel.hide()
self.ui.tgtEndXLabel.hide()
self.ui.tgtEndYLabel.hide()
self.ui.cmpEndPosLabel.hide()
self.ui.cmpEndXLabel.hide()
self.ui.cmpEndYLabel.hide()
self.ui.tgtXEndLineEdit.hide()
self.ui.tgtYEndLineEdit.hide()
self.ui.cmpXEndLineEdit.hide()
self.ui.cmpYEndLineEdit.hide()
self.ui.captureTgtEnd.hide()
self.ui.captureCmpEnd.hide()
def setImageNumber(self,number):
"""Set the image number."""
self.ui.imageSpinBox.setValue(number)
def loadImage(self, number):
"""Loads a new image.
*number* is the image number to be loaded.
This function uses `saltsafeio.getexposure` to get the correct
exposure from a list of fits files containing an arbitrary number
of extensions.
"""
# Emit signal
self.emit(QtCore.SIGNAL("imageNumberUpdated(int)"), number)
# Load image from file
self.img=saltsafeio.get_exposure(self.imlist,number)
# Display image
self.imdisplay.loadImage(self.img)
# Redraw canvas
self.imdisplay.redraw_canvas()
def mark(self,*args,**kwargs):
if self.mark_with=='square':
self.imdisplay.addSquare(*args,**kwargs)
elif self.mark_with=='circle':
self.imdisplay.addCircle(*args,**kwargs)
def textUpdated(self,key):
# Get object and parameter from key
obj,par=str(key).split(',')
# Check block
if self.block[obj][par]:
return
# Set block to prevent infinite repeat
self.block[obj][par]=True
# Recenter on object if requested
if par=='x' and self.recenter[obj].isChecked() and not self.centered[obj]:
x=float(self.input[obj]['x'].text())
y=float(self.input[obj]['y'].text())
r=float(self.input[obj]['r'].text())
x,y=find_object(self.img,x,y,self.distance)
self.input[obj]['x'].setText(str(x))
self.input[obj]['y'].setText(str(y))
self.centered[obj]=not(self.centered[obj])
# Check if object region size locking is on
if self.ui.lockObjectSizes.isChecked():
if par=='r':
r=self.input[obj]['r'].text()
if obj=='target':
self.input['comparison']['r'].setText(r)
elif obj=='comparison':
self.input['target']['r'].setText(r)
# Check if background size locking is on
if self.ui.lockBackgroundSize.isChecked():
if par in ['r1','r2']:
r=self.input[obj][par].text()
if obj=='target':
self.ui.cmpAnulusRadioButton.setChecked(True)
self.input['comparison'][par].setText(r)
elif obj=='comparison':
self.ui.tgtAnulusRadioButton.setChecked(True)
self.input['target'][par].setText(r)
elif par in ['x1','y1','x2','y2']:
c=self.input[obj][par].text()
if obj=='target':
self.ui.cmpRegionRadioButton.setChecked(True)
self.input['comparison'][par].setText(c)
elif obj=='comparison':
self.ui.tgtRegionRadioButton.setChecked(True)
self.input['target'][par].setText(c)
# Check if background region centering
if self.ui.allignTgtVerticalCenter.isChecked():
if par in ['y1','y2']:
y=float(self.input[obj][par].text())
center=self.img.shape[0]/2.0
height=abs(y-center)
self.input[obj]['y1'].setText(str(center+height))
self.input[obj]['y2'].setText(str(center-height))
# Draw markers
self.draw(key)
# Unset block
self.block[obj][par]=False
def draw(self,key):
"""Draws markers for object positions, and backgrounds.
To be called when any input widget value changes.
*key* is given by the signal mapper and consists of a string with
the object and parameter separated by a comma.
"""
# Get object and parameter from key
obj,par=str(key).split(',')
try:
# Set amplifier
self.amp[obj]=self.getCurrentAmp()
# Draw markers
if par=='x' or par=='y' or par=='r':
x=float(self.input[obj]['x'].text())
y=float(self.input[obj]['y'].text())
r=float(self.input[obj]['r'].text())
self.display[obj]['position']=True
self.mark(obj,x,y,r,color=self.line[obj]['color'],lw=self.line[obj]['width'])
elif par=='r1' or par=='r2':
# Annulus is selected so remove region marker
self.imdisplay.removePatch(obj+'_region')
x=float(self.input[obj]['x'].text())
y=float(self.input[obj]['y'].text())
r=float(self.input[obj][par].text())
# Keep track of the selected background mode
self.display[obj]['annulus']=True
self.display[obj]['region']=False
self.mark(obj+'_'+par,x,y,r,color=self.line[obj]['color'],lw=self.line[obj]['width'])
elif par=='x1' or par=='y1' or par=='x2' or par=='y2':
# Region is selected so remove annulus markers
self.imdisplay.removePatch(obj+'_r1')
self.imdisplay.removePatch(obj+'_r2')
x1=float(self.input[obj]['x1'].text())
y1=float(self.input[obj]['y1'].text())
x2=float(self.input[obj]['x2'].text())
y2=float(self.input[obj]['y2'].text())
# Keep track of the selected background mode
self.display[obj]['annulus']=False
self.display[obj]['region']=True
self.imdisplay.addRectangle(obj+'_region',x1,y1,x2,y2,
color=self.line[obj]['color'],lw=self.line[obj]['width'])
# Redraw canvas
self.imdisplay.redraw_canvas(keepzoom=True)
except ValueError:
pass
def redraw(self, number):
"""Redraws object and background markers for all objects on the
currently displayed amplifier *number*.
"""
self.imdisplay.reset()
# Find wich amplifier is currently displayed
amp=self.getCurrentAmp()
# (Re)draw markers
for obj in ['target','comparison']:
if self.amp[obj]==amp:
if self.display[obj]['position']:
self.draw(obj+','+'r')
if self.display[obj]['annulus']:
self.draw(obj+','+'r1')
self.draw(obj+','+'r2')
if self.display[obj]['region']:
self.draw(obj+','+'y2')
def getCurrentAmp(self, namps=4):
"""Returns the currently displayed amplifier.
*namps* is the number of amplifiers on the CCD.
"""
# Get exposure number
n=int(self.ui.imageSpinBox.value())
# Convert exposure number to current amplifier number
amp=n%namps
if amp==0:
amp=namps
return amp
def captureHandler(self, key):
"""Called when a capture button is clicked.
*key* is given by the signal mapper and consists of a string with
the object and parameter separated by a comma.
Depending on the *key* input widgets are added to the current
display lists.
Subsequent calls to `self.selectionHandler` get displayed in
the listed widgets.
"""
# Get object and parameter from key
obj,par=str(key).split(',')
# Add input widgets to lists
if par=='position':
self.xdisplay=[self.input[obj]['x']]
self.ydisplay=[self.input[obj]['y']]
self.rdisplay=[]
elif par=='radius':
self.xdisplay=[]
self.ydisplay=[]
self.x=float(self.input[obj]['x'].text())
self.y=float(self.input[obj]['y'].text())
self.rdisplay=[self.input[obj]['r']]
elif par=='annulus':
self.xdisplay=[]
self.ydisplay=[]
self.x=float(self.input[obj]['x'].text())
self.y=float(self.input[obj]['y'].text())
self.rdisplay=[self.input[obj]['r1'], self.input[obj]['r2']]
elif par=='region':
self.xdisplay=[self.input[obj]['x1'], self.input[obj]['x2']]
self.ydisplay=[self.input[obj]['y1'], self.input[obj]['y2']]
self.rdisplay=[]
def selectionHandler(self, x, y):
"""Event handler for click in image display window.
*x*, *y* is the position (in image pixel coordinates) of the click.
These positions are inserted into the first input widgets in the
display lists.
If a radius is requested this is calculated from the position given
in (self.x, self.y) which should be set to the current object.
"""
if len(self.xdisplay)>0:
display=self.xdisplay.pop(0)
display.setText(str(x))
if len(self.ydisplay)>0:
display=self.ydisplay.pop(0)
display.setText(str(y))
if len(self.rdisplay)>0:
r=np.sqrt((x-self.x)**2+(y-self.y)**2)
display=self.rdisplay.pop(0)
display.setText(str(r))
def setSearchDistance(self, distance):
"""Set search distance used for recentering."""
self.distance=int(distance)
def setMarkerType(self, marker):
"""Set marker type to 'circle' or 'square'."""
if marker in ['circle','square']:
self.mark_with=marker
else:
raise SaltIOError('Unknown marker type '+str(marker))
def setLineColor(self, object, color):
"""Changes the default line color used for marking."""
self.line[object]['color']=color
def setLineWidth(self, object, width):
"""Changes the default line width used for marking."""
self.line[object]['width']=width
def save(self):
"""Save configuration.
The format is::
For objects that use an anullus:
object amp x y r r1 r2
For objects that use a region:
object amp x y r x1 y1 x2 y2
"""
if (self.ui.tgtAnulusRadioButton.isChecked() and self.ui.cmpRegionRadioButton.isChecked()) or \
(self.ui.tgtRegionRadioButton.isChecked() and self.ui.cmpAnulusRadioButton.isChecked()):
msg='SLOTPREVIEW--SLOTPHOT can not handle different background types'
raise SaltError(msg)
# Write values to file
with open(self.config,'w') as f:
for i,obj in enumerate(['target','comparison']):
b_type='region'
if obj=='target':
print obj, self.ui.tgtAnulusRadioButton.isChecked()
if self.ui.tgtAnulusRadioButton.isChecked(): b_type='annulus'
elif obj=='comparison':
if self.ui.cmpAnulusRadioButton.isChecked(): b_type='annulus'
# If r1 is not zero, assumes annulus
line='%i\t%i\t' % (i+1, self.amp[obj])
if b_type=='annulus':
line+=''.join('%3.2f\t' % float(self.input[obj][key].text()) for key in ['x', 'y', 'r', 'r1', 'r2'])
else:
line+=''.join('%3.2f\t' % float(self.input[obj][key].text()) for key in ['x', 'y', 'r', 'x1', 'y2', 'x2', 'y2'])
# Write string to configfile
f.write(line.rstrip()+'\n')
# Exit program
self.close()
| bsd-3-clause |
jefffohl/nupic | examples/opf/tools/sp_plotter.py | 34 | 15177 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import sys
import os
import time
import copy
import csv
import numpy as np
from nupic.research.spatial_pooler import SpatialPooler
from nupic.bindings.math import GetNTAReal
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
realDType = GetNTAReal()
def generatePlot(outputs, origData):
""" Generates a table where each cell represent a frequency of pairs
as described below.
x coordinate is the % difference between input records (origData list),
y coordinate is the % difference between corresponding output records.
"""
PLOT_PRECISION = 100
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
outputSize = len(outputs)
for i in range(0,outputSize):
for j in range(i+1,outputSize):
in1 = outputs[i]
in2 = outputs[j]
dist = (abs(in1-in2) > 0.1)
intDist = int(dist.sum()/2+0.1)
orig1 = origData[i]
orig2 = origData[j]
origDist = (abs(orig1-orig2) > 0.1)
intOrigDist = int(origDist.sum()/2+0.1)
if intDist < 2 and intOrigDist > 10:
print 'Elements %d,%d has very small SP distance: %d' % (i, j, intDist)
print 'Input elements distance is %d' % intOrigDist
x = int(PLOT_PRECISION*intDist/40.0)
y = int(PLOT_PRECISION*intOrigDist/42.0)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
# Add some elements for the scale drawing
distribMatrix[4, 50] = 3
distribMatrix[4, 52] = 4
distribMatrix[4, 54] = 5
distribMatrix[4, 56] = 6
distribMatrix[4, 58] = 7
distribMatrix[4, 60] = 8
distribMatrix[4, 62] = 9
distribMatrix[4, 64] = 10
return distribMatrix
def generateRandomInput(numRecords, elemSize = 400, numSet = 42):
""" Generates a set of input record
Params:
numRecords - how many records to generate
elemSize - the size of each record (num 0s or 1s)
numSet - how many 1s in each record
Returns: a list of inputs
"""
inputs = []
for _ in xrange(numRecords):
input = np.zeros(elemSize, dtype=realDType)
for _ in range(0,numSet):
ind = np.random.random_integers(0, elemSize-1, 1)[0]
input[ind] = 1
while abs(input.sum() - numSet) > 0.1:
ind = np.random.random_integers(0, elemSize-1, 1)[0]
input[ind] = 1
inputs.append(input)
return inputs
def appendInputWithSimilarValues(inputs):
""" Creates an 'one-off' record for each record in the inputs. Appends new
records to the same inputs list.
"""
numInputs = len(inputs)
for i in xrange(numInputs):
input = inputs[i]
for j in xrange(len(input)-1):
if input[j] == 1 and input[j+1] == 0:
newInput = copy.deepcopy(input)
newInput[j] = 0
newInput[j+1] = 1
inputs.append(newInput)
break
def appendInputWithNSimilarValues(inputs, numNear = 10):
""" Creates a neighboring record for each record in the inputs and adds
new records at the end of the inputs list
"""
numInputs = len(inputs)
skipOne = False
for i in xrange(numInputs):
input = inputs[i]
numChanged = 0
newInput = copy.deepcopy(input)
for j in xrange(len(input)-1):
if skipOne:
skipOne = False
continue
if input[j] == 1 and input[j+1] == 0:
newInput[j] = 0
newInput[j+1] = 1
inputs.append(newInput)
newInput = copy.deepcopy(newInput)
#print input
#print newInput
numChanged += 1
skipOne = True
if numChanged == numNear:
break
def modifyBits(inputVal, maxChanges):
""" Modifies up to maxChanges number of bits in the inputVal
"""
changes = np.random.random_integers(0, maxChanges, 1)[0]
if changes == 0:
return inputVal
inputWidth = len(inputVal)
whatToChange = np.random.random_integers(0, 41, changes)
runningIndex = -1
numModsDone = 0
for i in xrange(inputWidth):
if numModsDone >= changes:
break
if inputVal[i] == 1:
runningIndex += 1
if runningIndex in whatToChange:
if i != 0 and inputVal[i-1] == 0:
inputVal[i-1] = 1
inputVal[i] = 0
return inputVal
def getRandomWithMods(inputSpace, maxChanges):
""" Returns a random selection from the inputSpace with randomly modified
up to maxChanges number of bits.
"""
size = len(inputSpace)
ind = np.random.random_integers(0, size-1, 1)[0]
value = copy.deepcopy(inputSpace[ind])
if maxChanges == 0:
return value
return modifyBits(value, maxChanges)
def testSP():
""" Run a SP test
"""
elemSize = 400
numSet = 42
addNear = True
numRecords = 2
wantPlot = True
poolPct = 0.5
itr = 1
doLearn = True
while numRecords < 3:
# Setup a SP
sp = SpatialPooler(
columnDimensions=(2048, 1),
inputDimensions=(1, elemSize),
potentialRadius=elemSize/2,
numActiveColumnsPerInhArea=40,
spVerbosity=0,
stimulusThreshold=0,
seed=1,
potentialPct=poolPct,
globalInhibition=True
)
# Generate inputs using rand()
inputs = generateRandomInput(numRecords, elemSize, numSet)
if addNear:
# Append similar entries (distance of 1)
appendInputWithNSimilarValues(inputs, 42)
inputSize = len(inputs)
print 'Num random records = %d, inputs to process %d' % (numRecords, inputSize)
# Run a number of iterations, with learning on or off,
# retrieve results from the last iteration only
outputs = np.zeros((inputSize,2048))
numIter = 1
if doLearn:
numIter = itr
for iter in xrange(numIter):
for i in xrange(inputSize):
time.sleep(0.001)
if iter == numIter - 1:
# TODO: See https://github.com/numenta/nupic/issues/2072
sp.compute(inputs[i], learn=doLearn, activeArray=outputs[i])
#print outputs[i].sum(), outputs[i]
else:
# TODO: See https://github.com/numenta/nupic/issues/2072
output = np.zeros(2048)
sp.compute(inputs[i], learn=doLearn, activeArray=output)
# Build a plot from the generated input and output and display it
distribMatrix = generatePlot(outputs, inputs)
# If we don't want a plot, just continue
if wantPlot:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (2048/40) distance in %')
plt.xlabel('Input (400/42) distance in %')
title = 'SP distribution'
if doLearn:
title += ', leaning ON'
else:
title += ', learning OFF'
title += ', inputs = %d' % len(inputs)
title += ', iterations = %d' % numIter
title += ', poolPct =%f' % poolPct
plt.suptitle(title, fontsize=12)
plt.show()
#plt.savefig(os.path.join('~/Desktop/ExperimentResults/videos5', '%s' % numRecords))
#plt.clf()
numRecords += 1
return
def testSPNew():
""" New version of the test"""
elemSize = 400
numSet = 42
addNear = True
numRecords = 1000
wantPlot = False
poolPct = 0.5
itr = 5
pattern = [60, 1000]
doLearn = True
start = 1
learnIter = 0
noLearnIter = 0
numLearns = 0
numTests = 0
numIter = 1
numGroups = 1000
PLOT_PRECISION = 100.0
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
inputs = generateRandomInput(numGroups, elemSize, numSet)
# Setup a SP
sp = SpatialPooler(
columnDimensions=(2048, 1),
inputDimensions=(1, elemSize),
potentialRadius=elemSize/2,
numActiveColumnsPerInhArea=40,
spVerbosity=0,
stimulusThreshold=0,
synPermConnected=0.12,
seed=1,
potentialPct=poolPct,
globalInhibition=True
)
cleanPlot = False
for i in xrange(numRecords):
input1 = getRandomWithMods(inputs, 4)
if i % 2 == 0:
input2 = getRandomWithMods(inputs, 4)
else:
input2 = input1.copy()
input2 = modifyBits(input2, 21)
inDist = (abs(input1-input2) > 0.1)
intInDist = int(inDist.sum()/2+0.1)
#print intInDist
if start == 0:
doLearn = True
learnIter += 1
if learnIter == pattern[start]:
numLearns += 1
start = 1
noLearnIter = 0
elif start == 1:
doLearn = False
noLearnIter += 1
if noLearnIter == pattern[start]:
numTests += 1
start = 0
learnIter = 0
cleanPlot = True
# TODO: See https://github.com/numenta/nupic/issues/2072
sp.compute(input1, learn=doLearn, activeArray=output1)
sp.compute(input2, learn=doLearn, activeArray=output2)
time.sleep(0.001)
outDist = (abs(output1-output2) > 0.1)
intOutDist = int(outDist.sum()/2+0.1)
if not doLearn and intOutDist < 2 and intInDist > 10:
"""
sp.spVerbosity = 10
# TODO: See https://github.com/numenta/nupic/issues/2072
sp.compute(input1, learn=doLearn, activeArray=output1)
sp.compute(input2, learn=doLearn, activeArray=output2)
sp.spVerbosity = 0
print 'Elements has very small SP distance: %d' % intOutDist
print output1.nonzero()
print output2.nonzero()
print sp._firingBoostFactors[output1.nonzero()[0]]
print sp._synPermBoostFactors[output1.nonzero()[0]]
print 'Input elements distance is %d' % intInDist
print input1.nonzero()
print input2.nonzero()
sys.stdin.readline()
"""
if not doLearn:
x = int(PLOT_PRECISION*intOutDist/40.0)
y = int(PLOT_PRECISION*intInDist/42.0)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
#print i
# If we don't want a plot, just continue
if wantPlot and cleanPlot:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (2048/40) distance in %')
plt.xlabel('Input (400/42) distance in %')
title = 'SP distribution'
#if doLearn:
# title += ', leaning ON'
#else:
# title += ', learning OFF'
title += ', learn sets = %d' % numLearns
title += ', test sets = %d' % numTests
title += ', iter = %d' % numIter
title += ', groups = %d' % numGroups
title += ', Pct =%f' % poolPct
plt.suptitle(title, fontsize=12)
#plt.show()
plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosNew', '%s' % i))
plt.clf()
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
cleanPlot = False
def testSPFile():
""" Run test on the data file - the file has records previously encoded.
"""
spSize = 2048
spSet = 40
poolPct = 0.5
pattern = [50, 1000]
doLearn = True
PLOT_PRECISION = 100.0
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
inputs = []
#file = open('~/Desktop/ExperimentResults/sampleArtificial.csv', 'rb')
#elemSize = 400
#numSet = 42
#file = open('~/Desktop/ExperimentResults/sampleDataBasilOneField.csv', 'rb')
#elemSize = 499
#numSet = 7
outdir = '~/Desktop/ExperimentResults/Basil100x21'
inputFile = outdir+'.csv'
file = open(inputFile, 'rb')
elemSize = 100
numSet = 21
reader = csv.reader(file)
for row in reader:
input = np.array(map(float, row), dtype=realDType)
if len(input.nonzero()[0]) != numSet:
continue
inputs.append(input.copy())
file.close()
# Setup a SP
sp = SpatialPooler(
columnDimensions=(spSize, 1),
inputDimensions=(1, elemSize),
potentialRadius=elemSize/2,
numActiveColumnsPerInhArea=spSet,
spVerbosity=0,
stimulusThreshold=0,
synPermConnected=0.10,
seed=1,
potentialPct=poolPct,
globalInhibition=True
)
cleanPlot = False
doLearn = False
print 'Finished reading file, inputs/outputs to process =', len(inputs)
size = len(inputs)
for iter in xrange(100):
print 'Iteration', iter
# Learn
if iter != 0:
for learnRecs in xrange(pattern[0]):
# TODO: See https://github.com/numenta/nupic/issues/2072
ind = np.random.random_integers(0, size-1, 1)[0]
sp.compute(inputs[ind], learn=True, activeArray=outputs[ind])
# Test
for _ in xrange(pattern[1]):
rand1 = np.random.random_integers(0, size-1, 1)[0]
rand2 = np.random.random_integers(0, size-1, 1)[0]
sp.compute(inputs[rand1], learn=False, activeArray=output1)
sp.compute(inputs[rand2], learn=False, activeArray=output2)
outDist = (abs(output1-output2) > 0.1)
intOutDist = int(outDist.sum()/2+0.1)
inDist = (abs(inputs[rand1]-inputs[rand2]) > 0.1)
intInDist = int(inDist.sum()/2+0.1)
if intInDist != numSet or intOutDist != spSet:
print rand1, rand2, '-', intInDist, intOutDist
x = int(PLOT_PRECISION*intOutDist/spSet)
y = int(PLOT_PRECISION*intInDist/numSet)
if distribMatrix[x, y] < 0.1:
distribMatrix[x, y] = 3
else:
if distribMatrix[x, y] < 10:
distribMatrix[x, y] += 1
if True:
plt.imshow(distribMatrix, origin='lower', interpolation = "nearest")
plt.ylabel('SP (%d/%d) distance in pct' % (spSize, spSet))
plt.xlabel('Input (%d/%d) distance in pct' % (elemSize, numSet))
title = 'SP distribution'
title += ', iter = %d' % iter
title += ', Pct =%f' % poolPct
plt.suptitle(title, fontsize=12)
#plt.savefig(os.path.join('~/Desktop/ExperimentResults/videosArtData', '%s' % iter))
plt.savefig(os.path.join(outdir, '%s' % iter))
plt.clf()
distribMatrix = np.zeros((PLOT_PRECISION+1,PLOT_PRECISION+1))
if __name__ == '__main__':
np.random.seed(83)
#testSP()
#testSPNew()
testSPFile()
| gpl-3.0 |
dsm054/pandas | pandas/tests/indexes/datetimes/test_scalar_compat.py | 1 | 10783 | # -*- coding: utf-8 -*-
"""
Tests for DatetimeIndex methods behaving like their Timestamp counterparts
"""
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import DatetimeIndex, Timestamp, date_range
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
class TestDatetimeIndexOps(object):
def test_dti_time(self):
rng = date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
assert (result == expected).all()
def test_dti_date(self):
rng = date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
assert (result == expected).all()
def test_dti_date_out_of_range(self):
# GH#1475
pytest.raises(ValueError, DatetimeIndex, ['1400-01-01'])
pytest.raises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
@pytest.mark.parametrize('field', [
'dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter',
'days_in_month', 'is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'])
def test_dti_timestamp_fields(self, field):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
expected = getattr(idx, field)[-1]
if field == 'weekday_name':
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = getattr(Timestamp(idx[-1]), field)
else:
result = getattr(Timestamp(idx[-1]), field)
assert result == expected
def test_dti_timestamp_freq_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
assert idx.freq == Timestamp(idx[-1], idx.freq).freq
assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
# ----------------------------------------------------------------
# DatetimeIndex.round
def test_round_daily(self):
dti = date_range('20130101 09:10:11', periods=5)
result = dti.round('D')
expected = date_range('20130101', periods=5)
tm.assert_index_equal(result, expected)
dti = dti.tz_localize('UTC').tz_convert('US/Eastern')
result = dti.round('D')
expected = date_range('20130101',
periods=5).tz_localize('US/Eastern')
tm.assert_index_equal(result, expected)
result = dti.round('s')
tm.assert_index_equal(result, dti)
# invalid
for freq in ['Y', 'M', 'foobar']:
pytest.raises(ValueError, lambda: dti.round(freq))
def test_round(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
rng.round(freq='foo')
with pytest.raises(ValueError, match=msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
rng.round(freq='M')
with pytest.raises(ValueError, match=msg):
elt.round(freq='M')
# GH#14440 & GH#15578
index = DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(False):
ts = '2016-10-17 12:00:00.001501031'
DatetimeIndex([ts]).round('1010ns')
def test_no_rounding_occurs(self, tz_naive_fixture):
# GH 21262
tz = tz_naive_fixture
rng = date_range(start='2016-01-01', periods=5,
freq='2Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='2T'),
Timestamp('2016-01-01 00:02:00', tz=tz, freq='2T'),
Timestamp('2016-01-01 00:04:00', tz=tz, freq='2T'),
Timestamp('2016-01-01 00:06:00', tz=tz, freq='2T'),
Timestamp('2016-01-01 00:08:00', tz=tz, freq='2T'),
])
tm.assert_index_equal(rng.round(freq='2T'), expected_rng)
@pytest.mark.parametrize('test_input, rounder, freq, expected', [
(['2117-01-01 00:00:45'], 'floor', '15s', ['2117-01-01 00:00:45']),
(['2117-01-01 00:00:45'], 'ceil', '15s', ['2117-01-01 00:00:45']),
(['2117-01-01 00:00:45.000000012'], 'floor', '10ns',
['2117-01-01 00:00:45.000000010']),
(['1823-01-01 00:00:01.000000012'], 'ceil', '10ns',
['1823-01-01 00:00:01.000000020']),
(['1823-01-01 00:00:01'], 'floor', '1s', ['1823-01-01 00:00:01']),
(['1823-01-01 00:00:01'], 'ceil', '1s', ['1823-01-01 00:00:01']),
(['2018-01-01 00:15:00'], 'ceil', '15T', ['2018-01-01 00:15:00']),
(['2018-01-01 00:15:00'], 'floor', '15T', ['2018-01-01 00:15:00']),
(['1823-01-01 03:00:00'], 'ceil', '3H', ['1823-01-01 03:00:00']),
(['1823-01-01 03:00:00'], 'floor', '3H', ['1823-01-01 03:00:00']),
(('NaT', '1823-01-01 00:00:01'), 'floor', '1s',
('NaT', '1823-01-01 00:00:01')),
(('NaT', '1823-01-01 00:00:01'), 'ceil', '1s',
('NaT', '1823-01-01 00:00:01'))
])
def test_ceil_floor_edge(self, test_input, rounder, freq, expected):
dt = DatetimeIndex(list(test_input))
func = getattr(dt, rounder)
result = func(freq)
expected = DatetimeIndex(list(expected))
assert expected.equals(result)
@pytest.mark.parametrize('start, index_freq, periods', [
('2018-01-01', '12H', 25),
('2018-01-01 0:0:0.124999', '1ns', 1000),
])
@pytest.mark.parametrize('round_freq', [
'2ns', '3ns', '4ns', '5ns', '6ns', '7ns',
'250ns', '500ns', '750ns',
'1us', '19us', '250us', '500us', '750us',
'1s', '2s', '3s',
'12H', '1D',
])
def test_round_int64(self, start, index_freq, periods, round_freq):
dt = DatetimeIndex(start=start, freq=index_freq, periods=periods)
unit = to_offset(round_freq).nanos
# test floor
result = dt.floor(round_freq)
diff = dt.asi8 - result.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), "floor not a {} multiple".format(round_freq)
assert (0 <= diff).all() and (diff < unit).all(), "floor error"
# test ceil
result = dt.ceil(round_freq)
diff = result.asi8 - dt.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), "ceil not a {} multiple".format(round_freq)
assert (0 <= diff).all() and (diff < unit).all(), "ceil error"
# test round
result = dt.round(round_freq)
diff = abs(result.asi8 - dt.asi8)
mod = result.asi8 % unit
assert (mod == 0).all(), "round not a {} multiple".format(round_freq)
assert (diff <= unit // 2).all(), "round error"
if unit % 2 == 0:
assert (
result.asi8[diff == unit // 2] % 2 == 0
).all(), "round half to even error"
# ----------------------------------------------------------------
# DatetimeIndex.normalize
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
tm.assert_index_equal(result, expected)
arr_ns = np.array([1380585623454345752,
1380585612343234312]).astype("datetime64[ns]")
rng_ns = DatetimeIndex(arr_ns)
rng_ns_normalized = rng_ns.normalize()
arr_ns = np.array([1380585600000000000,
1380585600000000000]).astype("datetime64[ns]")
expected = DatetimeIndex(arr_ns)
tm.assert_index_equal(rng_ns_normalized, expected)
assert result.is_normalized
assert not rng.is_normalized
class TestDateTimeIndexToJulianDate(object):
def test_1700(self):
dr = date_range(start=Timestamp('1710-10-01'), periods=5, freq='D')
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
dr = date_range(start=Timestamp('2000-02-27'), periods=5, freq='D')
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
dr = date_range(start=Timestamp('2000-02-27'), periods=5, freq='H')
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_minute(self):
dr = date_range(start=Timestamp('2000-02-27'), periods=5, freq='T')
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_second(self):
dr = date_range(start=Timestamp('2000-02-27'), periods=5, freq='S')
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
| bsd-3-clause |
466152112/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 227 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |