repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
DynamicGravitySystems/DGP | tests/test_etc.py | 1 | 5361 | import unittest
import numpy as np
import pandas as pd
from dgp.lib.etc import align_frames
class TestAlignOps(unittest.TestCase):
# TODO: Test with another DatetimeIndex
# TODO: Test with other interpolation methods
# TODO: Tests for interp_only
def test_align_args(self):
frame1 = pd.Series(np.arange(10))
index1 = pd.Timestamp('2018-01-29 15:19:28.000') + \
pd.to_timedelta(np.arange(10), unit='s')
frame1.index = index1
frame2 = pd.Series(np.arange(10, 20))
index2 = pd.Timestamp('2018-01-29 15:00:28.002') + \
pd.to_timedelta(np.arange(10), unit='s')
frame2.index = index2
msg = 'Invalid value for align_to parameter: invalid'
with self.assertRaises(ValueError, msg=msg):
align_frames(frame1, frame2, align_to='invalid')
msg = 'Frames do not overlap'
with self.assertRaises(ValueError, msg=msg):
align_frames(frame1, frame2)
frame1 = pd.Series(np.arange(10))
index1 = pd.Timestamp('2018-01-29 15:00:28.000') + \
pd.to_timedelta(np.arange(10), unit='s')
frame1.index = index1
frame2 = pd.Series(np.arange(10, 20))
index2 = pd.Timestamp('2018-01-29 15:19:28.002') + \
pd.to_timedelta(np.arange(10), unit='s')
frame2.index = index2
msg = 'Frames do not overlap'
with self.assertRaises(ValueError, msg=msg):
align_frames(frame1, frame2)
def test_align_crop(self):
frame1 = pd.Series(np.arange(10))
index1 = pd.Timestamp('2018-01-29 15:19:30.000') + \
pd.to_timedelta(np.arange(10), unit='s')
frame1.index = index1
frame2 = pd.Series(np.arange(10, 20))
index2 = pd.Timestamp('2018-01-29 15:19:28.002') + \
pd.to_timedelta(np.arange(10), unit='s')
frame2.index = index2
# align left
aframe1, aframe2 = align_frames(frame1, frame2, align_to='left')
self.assertTrue(aframe1.index.equals(aframe2.index))
# align right
aframe1, aframe2 = align_frames(frame1, frame2, align_to='right')
self.assertTrue(aframe1.index.equals(aframe2.index))
def test_align_and_crop_series(self):
frame1 = pd.Series(np.arange(10))
index1 = pd.Timestamp('2018-01-29 15:19:28.000') + \
pd.to_timedelta(np.arange(10), unit='s')
frame1.index = index1
frame2 = pd.Series(np.arange(10, 20))
index2 = pd.Timestamp('2018-01-29 15:19:28.002') + \
pd.to_timedelta(np.arange(10), unit='s')
frame2.index = index2
# align left
aframe1, aframe2 = align_frames(frame1, frame2, align_to='left')
self.assertTrue(aframe1.index.equals(aframe2.index))
# align right
aframe1, aframe2 = align_frames(frame1, frame2, align_to='right')
self.assertTrue(aframe1.index.equals(aframe2.index))
def test_align_and_crop_df(self):
frame1 = pd.DataFrame(np.array([np.arange(10), np.arange(10, 20)]).T)
index1 = pd.Timestamp('2018-01-29 15:19:28.000') + \
pd.to_timedelta(np.arange(10), unit='s')
frame1.index = index1
frame2 = pd.DataFrame(np.array([np.arange(20,30), np.arange(30, 40)]).T)
index2 = pd.Timestamp('2018-01-29 15:19:28.002') + \
pd.to_timedelta(np.arange(10), unit='s')
frame2.index = index2
# align left
aframe1, aframe2 = align_frames(frame1, frame2, align_to='left')
self.assertFalse(aframe1.index.empty)
self.assertFalse(aframe2.index.empty)
self.assertTrue(aframe1.index.equals(aframe2.index))
# align right
aframe1, aframe2 = align_frames(frame1, frame2, align_to='right')
self.assertFalse(aframe1.index.empty)
self.assertFalse(aframe2.index.empty)
self.assertTrue(aframe1.index.equals(aframe2.index))
def test_align_and_crop_df_fill(self):
frame1 = pd.DataFrame(np.array([np.arange(10), np.arange(10, 20)]).T)
frame1.columns = ['A', 'B']
index1 = pd.Timestamp('2018-01-29 15:19:28.000') + \
pd.to_timedelta(np.arange(10), unit='s')
frame1.index = index1
frame2 = pd.DataFrame(np.array([np.arange(20, 30), np.arange(30, 40)]).T)
frame2.columns = ['C', 'D']
index2 = pd.Timestamp('2018-01-29 15:19:28.002') + \
pd.to_timedelta(np.arange(10), unit='s')
frame2.index = index2
aframe1, aframe2 = align_frames(frame1, frame2, fill={'B': 'bfill'})
self.assertTrue(aframe1['B'].equals(frame1['B'].iloc[1:].astype(float)))
left, right = frame1.align(frame2, axis=0, copy=True)
left = left.fillna(method='bfill')
left = left.reindex(frame2.index).dropna()
aframe1, aframe2 = align_frames(frame1, frame2, align_to='right',
fill={'B': 'bfill'})
self.assertTrue(aframe1['B'].equals(left['B']))
left, right = frame1.align(frame2, axis=0, copy=True)
left = left.fillna(value=0)
left = left.reindex(frame2.index).dropna()
aframe1, aframe2 = align_frames(frame1, frame2, align_to='right',
fill={'B': 0})
self.assertTrue(aframe1['B'].equals(left['B']))
| apache-2.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/tests/frame/test_mutate_columns.py | 7 | 7831 | # -*- coding: utf-8 -*-
from __future__ import print_function
from pandas.compat import range, lrange
import numpy as np
from pandas import DataFrame, Series, Index
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
# Column add, remove, delete.
class TestDataFrameMutateColumns(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_assign(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
original = df.copy()
result = df.assign(C=df.B / df.A)
expected = df.copy()
expected['C'] = [4, 2.5, 2]
assert_frame_equal(result, expected)
# lambda syntax
result = df.assign(C=lambda x: x.B / x.A)
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
# Non-Series array-like
result = df.assign(C=[4, 2.5, 2])
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
result = df.assign(B=df.B / df.A)
expected = expected.drop('B', axis=1).rename(columns={'C': 'B'})
assert_frame_equal(result, expected)
# overwrite
result = df.assign(A=df.A + df.B)
expected = df.copy()
expected['A'] = [5, 7, 9]
assert_frame_equal(result, expected)
# lambda
result = df.assign(A=lambda x: x.A + x.B)
assert_frame_equal(result, expected)
def test_assign_multiple(self):
df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=['A', 'B'])
result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)
expected = DataFrame([[1, 4, 7, 1, 4], [2, 5, 8, 2, 5],
[3, 6, 9, 3, 6]], columns=list('ABCDE'))
assert_frame_equal(result, expected)
def test_assign_alphabetical(self):
# GH 9818
df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
result = df.assign(D=df.A + df.B, C=df.A - df.B)
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],
columns=list('ABCD'))
assert_frame_equal(result, expected)
result = df.assign(C=df.A - df.B, D=df.A + df.B)
assert_frame_equal(result, expected)
def test_assign_bad(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
# non-keyword argument
with tm.assertRaises(TypeError):
df.assign(lambda x: x.A)
with tm.assertRaises(AttributeError):
df.assign(C=df.A, D=df.A + df.C)
with tm.assertRaises(KeyError):
df.assign(C=lambda df: df.A, D=lambda df: df['A'] + df['C'])
with tm.assertRaises(KeyError):
df.assign(C=df.A, D=lambda x: x['A'] + x['C'])
def test_insert_error_msmgs(self):
# GH 7432
df = DataFrame({'foo': ['a', 'b', 'c'], 'bar': [
1, 2, 3], 'baz': ['d', 'e', 'f']}).set_index('foo')
s = DataFrame({'foo': ['a', 'b', 'c', 'a'], 'fiz': [
'g', 'h', 'i', 'j']}).set_index('foo')
msg = 'cannot reindex from a duplicate axis'
with assertRaisesRegexp(ValueError, msg):
df['newcol'] = s
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)),
columns=['a', 'b', 'c', 'd'])
msg = 'incompatible index of inserted column with frame index'
with assertRaisesRegexp(TypeError, msg):
df['gr'] = df.groupby(['b', 'c']).count()
def test_insert_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=lrange(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K),
index=lrange(N))
assert_frame_equal(df, expected)
def test_insert(self):
df = DataFrame(np.random.randn(5, 3), index=np.arange(5),
columns=['c', 'b', 'a'])
df.insert(0, 'foo', df['a'])
self.assert_index_equal(df.columns, Index(['foo', 'c', 'b', 'a']))
tm.assert_series_equal(df['a'], df['foo'], check_names=False)
df.insert(2, 'bar', df['c'])
self.assert_index_equal(df.columns,
Index(['foo', 'c', 'bar', 'b', 'a']))
tm.assert_almost_equal(df['c'], df['bar'], check_names=False)
# diff dtype
# new item
df['x'] = df['a'].astype('float32')
result = Series(dict(float64=5, float32=1))
self.assertTrue((df.get_dtype_counts() == result).all())
# replacing current (in different block)
df['a'] = df['a'].astype('float32')
result = Series(dict(float64=4, float32=2))
self.assertTrue((df.get_dtype_counts() == result).all())
df['y'] = df['a'].astype('int32')
result = Series(dict(float64=4, float32=2, int32=1))
self.assertTrue((df.get_dtype_counts() == result).all())
with assertRaisesRegexp(ValueError, 'already exists'):
df.insert(1, 'a', df['b'])
self.assertRaises(ValueError, df.insert, 1, 'c', df['b'])
df.columns.name = 'some_name'
# preserve columns name field
df.insert(0, 'baz', df['c'])
self.assertEqual(df.columns.name, 'some_name')
# GH 13522
df = DataFrame(index=['A', 'B', 'C'])
df['X'] = df.index
df['X'] = ['x', 'y', 'z']
exp = DataFrame(data={'X': ['x', 'y', 'z']}, index=['A', 'B', 'C'])
assert_frame_equal(df, exp)
def test_delitem(self):
del self.frame['A']
self.assertNotIn('A', self.frame)
def test_pop(self):
self.frame.columns.name = 'baz'
self.frame.pop('A')
self.assertNotIn('A', self.frame)
self.frame['foo'] = 'bar'
self.frame.pop('foo')
self.assertNotIn('foo', self.frame)
# TODO self.assertEqual(self.frame.columns.name, 'baz')
# 10912
# inplace ops cause caching issue
a = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[
'A', 'B', 'C'], index=['X', 'Y'])
b = a.pop('B')
b += 1
# original frame
expected = DataFrame([[1, 3], [4, 6]], columns=[
'A', 'C'], index=['X', 'Y'])
assert_frame_equal(a, expected)
# result
expected = Series([2, 5], index=['X', 'Y'], name='B') + 1
assert_series_equal(b, expected)
def test_pop_non_unique_cols(self):
df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})
df.columns = ["a", "b", "a"]
res = df.pop("a")
self.assertEqual(type(res), DataFrame)
self.assertEqual(len(res), 2)
self.assertEqual(len(df.columns), 1)
self.assertTrue("b" in df.columns)
self.assertFalse("a" in df.columns)
self.assertEqual(len(df.index), 2)
def test_insert_column_bug_4032(self):
# GH4032, inserting a column and renaming causing errors
df = DataFrame({'b': [1.1, 2.2]})
df = df.rename(columns={})
df.insert(0, 'a', [1, 2])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1, 1.1], [2, 2.2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
df.insert(0, 'c', [1.3, 2.3])
result = df.rename(columns={})
str(result)
expected = DataFrame([[1.3, 1, 1.1], [2.3, 2, 2.2]],
columns=['c', 'a', 'b'])
assert_frame_equal(result, expected)
| mit |
kalfasyan/DA224x | code/old code/testing.py | 1 | 10606 | import random
import numpy as np
import itertools
import matplotlib.pylab as plt
from scipy import linalg as la
import time
from progressbar import *
from collections import Counter
import decimal
import math
output = open('matrixExport.txt', 'wb')
widgets = ['Working: ', Percentage(), ' ', Bar(marker='=',
left='[',right=']'), ' ', ETA(), ' ', FileTransferSpeed()]
""" Checks if 2 neurons belong in the same hypercolumn """
def same_hypercolumn(q,w):
for i in hypercolumns:
if q in i and w in i:
return True
return False
""" Checks if 2 neurons belong in the same minicolumn """
def same_minicolumn(q,w):
if same_hypercolumn(q,w):
for mc in minicolumns:
if q in mc and w in mc:
return True
return False
""" Checks if 2 neurons belong in the same layer """
def same_layer(q,w):
if same_hypercolumn(q,w):
if q in layers23 and w in layers23:
return True
elif q in layers4 and w in layers4:
return True
elif q in layers5 and w in layers5:
return True
return False
def next_hypercolumn(q,w):
if same_hypercolumn(q,w):
return False
for i in range(len(split_hc)):
for j in split_hc[i]:
if j < len(split_hc):
if (q in split_hc[i] and w in split_hc[i+1]):
return True
return False
def prev_hypercolumn(q,w):
if same_hypercolumn(q,w):
return False
for i in range(len(split_hc)):
for j in split_hc[i]:
if i >0:
if (q in split_hc[i] and w in split_hc[i-1]):
return True
return False
def diff_hypercolumns(q,w):
if next_hypercolumn(q,w):
if (q in layers5 and w in layers4):
return flip(0.20,q)
elif prev_hypercolumn(q,w):
if (q in layers5 and w in layers23):
return flip(0.20,q)
return 0
def both_exc(q,w):
if same_layer(q,w):
if (q in excitatory_nrns and w in excitatory_nrns):
return True
return False
def both_inh(q,w):
if same_layer(q,w):
if (q in inhibitory_nrns and w in inhibitory_nrns):
return True
return False
""" Returns 1 under probability 'p', else 0 (0<=p<=1)"""
def flipAdj(p,q):
if q in excitatory_nrns:
return 1 if random.random() < p else 0
elif q in inhibitory_nrns:
return -1 if random.random() < p else 0
def flip(p,q):
if q in excitatory_nrns:
return (np.random.normal(0,sigma)+.5) if random.random() < p else 0
elif q in inhibitory_nrns:
return (np.random.normal(0,sigma)-.5) if random.random() < p else 0
def check_zero(z):
unique, counts = np.unique(z, return_counts=True)
occurence = np.asarray((unique, counts)).T
for i in range(len(z)):
if np.sum(z) != 0:
if len(occurence)==3 and occurence[0][1]>occurence[2][1]:
if z[i] == -1:
z[i] = 0
elif len(occurence)==3 and occurence[2][1]>occurence[0][1]:
if z[i] == 1:
z[i] = 0
elif len(occurence) < 3:
if z[i] == -1:
z[i] += 1
if z[i] == 1:
z[i] -= 1
else:
return z
def balance(l):
N = len(l)
meanP, meanN = 0,0
c1, c2 = 0,0
for i in range(N):
if l[i] > 0:
meanP += l[i]
c1+=1
if l[i] < 0:
meanN += l[i]
c2+=1
diff = abs(meanP)-abs(meanN)
for i in range(N):
if l[i] < 0:
l[i] -= diff/(c2)
return l
def balanceN(mat):
N = len(mat)
sumP,sumN = 0,0
c,c2=0,0
for i in range(N):
for j in range(N):
if mat[j][i] > 0:
sumP += mat[j][i]
c+=1
elif mat[j][i] < 0:
sumN += mat[j][i]
c2+=1
diff = sumP + sumN
for i in range(N):
for j in range(N):
if mat[j][i] < 0:
mat[j][i] -= diff/c2
#########################################################
""" 1. INITIALIZATIONS """
exc_nrns_mc = 32
inh_nrns_mc = 8
lr_mc = 3
mc_hc = 4
hc = 2
nrns = (exc_nrns_mc+inh_nrns_mc)*hc*mc_hc*lr_mc
pbar = ProgressBar(widgets=widgets, maxval=nrns)
q = 1
sigma = math.sqrt(q/decimal.Decimal(nrns))
sigma2 = math.sqrt(1/decimal.Decimal(nrns))
mu = 0
nrns_hc = nrns/hc
nrns_mc = nrns_hc/mc_hc
nrns_l23 = nrns_mc/3
nrns_l4 = nrns_l23
nrns_l5 = nrns_l23
print nrns,"neurons."
print nrns_hc, "per hypercolumn in %s" %hc,"hypercolumns."
print nrns_mc, "per minicolumn in %s" %mc_hc,"minicolumns."
print nrns_l23, "in each layer in %s" %lr_mc,"layers"
##############################################################
""" 2. Creating list of Hypercolumns, list of minicolumns within
hypercolumns, list of layers within minicolumns within
hypercolumns"""
split = [i for i in range(nrns)]
split_hc = zip(*[iter(split)]*nrns_hc)
split_mc = []
split_lr = []
for i in range(len(split_hc)):
split_mc.append(zip(*[iter(split_hc[i])]*nrns_mc))
for j in range(len(split_mc[i])):
split_lr.append(zip(*[iter(split_mc[i][j])]*nrns_l23))
split_exc = []
split_inh = []
for i in range(len(split_lr)):
for j in split_lr[i]:
split_exc.append(j[0:exc_nrns_mc])
split_inh.append(j[exc_nrns_mc:])
##############################################################
""" 3. Creating sets for all minicolumns and all layers """
hypercolumns = set(split_hc)
minitemp = []
for i in range(len(split_mc)):
for j in split_mc[i]:
minitemp.append(j)
minicolumns = set(minitemp)
l23temp = []
l4temp = []
l5temp = []
for i in range(len(split_lr)):
for j in range(len(split_lr[i])):
if j == 0:
l23temp.append(split_lr[i][j])
if j == 1:
l4temp.append(split_lr[i][j])
if j == 2:
l5temp.append(split_lr[i][j])
layers23 = set(list(itertools.chain.from_iterable(l23temp)))
layers4 = set(list(itertools.chain.from_iterable(l4temp)))
layers5 = set(list(itertools.chain.from_iterable(l5temp)))
excitatory_nrns = set(list(itertools.chain.from_iterable(split_exc)))
inhibitory_nrns = set(list(itertools.chain.from_iterable(split_inh)))
""" 4. Connection matrix operations """
##############################################################
#_________________________________________________________________________________________
start_time = time.time()
print "Initializing and creating connection matrix..."
conn_matrix = np.zeros((nrns,nrns))
pbar.start()
for i in range(nrns):
for j in range(nrns):
#conn_matrix[j][i] = flip(13.35,i)
#"""
# SAME HYPERCOLUMN
if same_hypercolumn(i,j):
#if same_minicolumn(i,j) or same_layer(i,j):
conn_matrix[j][i] = flip(.35,i)
# DIFFERENT HYPERCOLUMN
elif next_hypercolumn(i,j):
if (i in layers5 and j in layers4):
conn_matrix[j][i]= flip(0.35,i)
elif prev_hypercolumn(i,j):
if (i in layers5 and j in layers23):
conn_matrix[j][i]= flip(0.35,i)
else:
conn_matrix[j][i] = flip(0.00001,i)
# LAYER 2/3
if i in layers23 and j in layers23:
if both_exc(i,j):
conn_matrix[j][i]= flip(0.35,i)
elif both_inh(i,j):
conn_matrix[j][i]= flip(0.35,i)
elif i in excitatory_nrns and j in inhibitory_nrns:
conn_matrix[j][i]= flip(0.35,i)
else:
conn_matrix[j][i]= flip(0.35,i)
# LAYER 4
elif i in layers4 and j in layers4:
if both_exc(i,j):
conn_matrix[j][i]= flip(0.35,i)
elif both_inh(i,j):
conn_matrix[j][i]= flip(0.35,i)
elif i in excitatory_nrns and j in inhibitory_nrns:
conn_matrix[j][i]= flip(0.35,i)
else:
conn_matrix[j][i]= flip(0.35,i)
# LAYER 5
elif i in layers5 and j in layers5:
if both_exc(i,j):
conn_matrix[j][i]= flip(0.35,i)
elif both_inh(i,j):
conn_matrix[j][i]= flip(0.35,i)
elif i in excitatory_nrns and j in inhibitory_nrns:
conn_matrix[j][i]= flip(0.35,i)
else:
conn_matrix[j][i]= flip(0.35,i)
# FROM LAYER4 -> LAYER2/3
elif i in layers4 and j in layers23:
if both_exc(i,j):
conn_matrix[j][i]= flip(0.35,i)
elif both_inh(i,j):
conn_matrix[j][i]= flip(0.35,i)
elif i in excitatory_nrns and j in inhibitory_nrns:
conn_matrix[j][i]= flip(0.35,i)
else:
conn_matrix[j][i]= flip(0.35,i)
# FROM LAYER2/3 -> LAYER5
elif i in layers23 and j in layers5:
if both_exc(i,j):
conn_matrix[j][i]= flip(0.35,i)
elif both_inh(i,j):
conn_matrix[j][i]= flip(0.35,i)
elif i in excitatory_nrns and j in inhibitory_nrns:
conn_matrix[j][i]= flip(0.35,i)
else:
conn_matrix[j][i]= flip(0.35,i)
#"""
#"""
pbar.update(i)
pbar.finish()
#_________________________________________________________________________________________
noB_var_row = np.var(conn_matrix,1)
noB_var_col = np.var(conn_matrix,0)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
k=[]
for i in range(nrns):
if np.sum(conn_matrix[i,:]) > 1e-5:
k.append(i)
print "Row sums not zero", len(k)
#balanceN(conn_matrix)
#for i in range(len(k)):
# balance(conn_matrix[k[i],:])
delta =0
for i in range(nrns):
if np.sum(conn_matrix[i,:]) > 1e-5:
delta+=1
#print np.sum(conn_matrix[i,:])
#print i
print "sum of all matrix",np.sum(conn_matrix)
print "Row sums not to zero after balance",delta
h,z=0,0
for i in range(nrns):
if i in excitatory_nrns:
for j in conn_matrix[:,i]:
if j < 0:
h+=1
if i in inhibitory_nrns:
for j in conn_matrix[:,i]:
if j > 0:
z+=1
print h,"negatives in exc"
print z,"positives in inh"
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
B_var_row = np.var(conn_matrix,1)
B_var_col = np.var(conn_matrix,0)
#"""
print ("Matrix Created in %.5s seconds." % (time.time() - start_time))
print "Loading plot..."
ed = np.linspace(-4.,1,1e3)
hh,ed= np.histogram(conn_matrix.flatten(),ed)
tt = np.linspace(np.pi,-np.pi,1e2)
sx = np.sin(tt)
sy = np.cos(tt)
ee = la.eigvals(conn_matrix)
ed_ev = np.linspace(-20,20,1e2)
hh_real,ed1 = np.histogram(ee.real,ed_ev)
hh_imag,ed1 = np.histogram(ee.imag,ed_ev)
plt.figure(2)
plt.clf()
plt.subplot(3,2,1)
plt.scatter(ee.real,ee.imag)
plt.plot(sx,sy,'r')
#plt.pcolor(conn_matrix, cmap=plt.cm.Blues)
plt.title("%.8s variance," % sigma**2 +str(mu)+" mean")
#plt.axis('equal')
plt.xlim(min(ed_ev),max(ed_ev))
plt.ylim(min(ed_ev),max(ed_ev))
plt.subplot(3,2,2)
plt.plot(hh_imag,ed_ev[0:-1])
plt.ylim(min(ed_ev),max(ed_ev))
#plt.ylim(0,100)
plt.xlabel("max ee.real %.5s" % np.max(ee.real) + " max ee.imag %.5s" %np.max(ee.imag))
plt.subplot(3,2,3)
plt.plot(ed_ev[0:-1],hh_real)
plt.xlim(min(ed_ev),max(ed_ev))
plt.subplot(3,2,4)
#plt.plot(noB_var_row)#, cmap=plt.cm.RdYlBu)
#plt.plot(noB_var_col)#, cmap=plt.cm.RdYlBu)
#plt.plot(B_var_row)#, cmap=plt.cm.RdYlBu)
plt.plot(B_var_col)#, cmap=plt.cm.RdYlBu)
plt.subplot(3,2,5)
plt.pcolor(conn_matrix)#, cmap=plt.cm.RdYlBu)
plt.subplot(3,2,6)
plt.plot(ed[0:-1],hh)
#plt.ylim(0,800)
plt.show()
#"""
#np.savetxt('matrixExport.txt', conn_matrix, fmt='%.1s')
#print "\nWrote to matrixExport.txt"
"""
cmaps(['indexed','Blues','OrRd','PiYG','PuOr',
'RdYlBu','RdYlGn','afmhot','binary','copper',
'gist_ncar','gist_rainbow','own1','own2'])
"""
| gpl-2.0 |
ky822/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
Vijaysai005/KProject | vijay/DBSCAN/temp/Cluster_redit.py | 1 | 4627 | # usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 06 13:15:05 2017
@author: Vijayasai S
"""
# Use python3
from sklearn.cluster import DBSCAN
import csv
import numpy as np
import pandas as pd
class CreateDict(dict):
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def loadData(data_file, *args, **kwargs):
dict = {}
for key,value in kwargs.items():
dict[key]=value
df = pd.DataFrame(data_file, columns=[*args])
try:
nrow = len(df[args[0]])
except Exception:
raise Exception("Aleast one argument should be there!")
try:
ncol = len(df.columns)- dict["start_column"]
except KeyError:
raise KeyError("Mention the column number to start (0,1,2,...n) as keyword argument (eg.,start_column=0)")
data = np.empty((nrow, ncol))
for i, j in enumerate(data_file):
data[i] = np.asarray(j[dict["start_column"]:], dtype=np.float)
if "unit_id" in args:
return [CreateDict(data=data).data,df]
else:
raise KeyError("unit_id is missing")
def DictToList(listOfDict):
varName = []
for dict in listOfDict:
for key,value in dict.items():
if key not in varName:
varName.append(key)
df = pd.DataFrame(listOfDict)
if 'unit_id' in varName:
if 'latitude' in varName:
if 'longitude' in varName:
listOflist = [[df['unit_id'][i],df['latitude'][i],df['longitude'][i]] for i in range(len(df['unit_id']))]
else:
raise KeyError("Thera is no key like (\"unit_id\",\"latitude\",\"longitude\")")
return listOflist
def cluster(data, dataframe, eps, min_samples):
db = DBSCAN(eps=eps, min_samples=min_samples).fit(data)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
unique_labels = set(labels)
outlier = [] ; cluster = []
for k in unique_labels:
part_cluster = []
class_member_mask = (labels == k)
xy = data[class_member_mask & core_samples_mask]
if k != -1:
part_cluster.append(xy)
cluster.append(part_cluster)
xy = data[class_member_mask & ~core_samples_mask]
if k == -1:
outlier.append(xy)
main_dict = {} ; lat_dict = {} ; long_dict = {}
lat_id = {} ; long_id = {} ; unit_id = {}
for i in range(n_clusters_):
lat_dict[str(i+1)] = []
long_dict[str(i+1)] = []
unit_id[str(i+1)] = []
for j in range(len(cluster[i])):
m = 0
for k in range(len(cluster[i][j])):
if m == 0:
lat_dict[str(i+1)].append(cluster[i][j][k][0])
long_dict[str(i+1)].append(cluster[i][j][k][1])
else:
lat_dict[str(i+1)].append(cluster[i][j][k][0])
long_dict[str(i+1)].append(cluster[i][j][k][1])
m += 1
for l in range(len(dataframe["unit_id"])):
if float(cluster[i][j][k][0]) == float(dataframe["latitude"][l]) and \
float(cluster[i][j][k][1]) == float(dataframe["longitude"][l]):
unit_id[str(i+1)].append(dataframe["unit_id"][l])
try:
lat_dict["outlier"] = [] ; long_dict["outlier"] = []
for i in range(len(outlier[0])):
lat_dict["outlier"].append(outlier[0][i][0])
long_dict["outlier"].append(outlier[0][i][1])
except Exception:
print ("No outlier datas")
main_dict["latitude"] = lat_dict
main_dict["longitude"] = long_dict
main_dict["unit_id"] = unit_id
return main_dict
def distance(cluster_lat, cluster_long, outlier_lat, outlier_long):
return np.linalg.norm(np.array((cluster_lat, cluster_long))-np.array((outlier_lat, outlier_long)))
# FindCluster function determines the nearest cluster of the outlier
def FindCluster(main_dict, _id):
dist = 10**12 ; dist_i = []
for i in range(len(main_dict["latitude"]) - 1):
for j in range(len(main_dict["latitude"][str(i+1)])):
pair_wise = distance(main_dict["latitude"][str(i+1)][j] , main_dict["longitude"][str(i+1)][j] , \
main_dict["latitude"]["outlier"][_id] ,main_dict["longitude"]["outlier"][_id])
if pair_wise < dist:
dist = pair_wise
dist_i.append(str(i+1))
return dist_i[-1]
| gpl-3.0 |
liberatorqjw/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
rohit21122012/DCASE2013 | runs/2016/dnn2016med_mfcc_51/task1_scene_classification.py | 7 | 37780 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# DCASE 2016::Acoustic Scene Classification / Baseline System
import argparse
import textwrap
import timeit
import skflow
from sklearn import mixture
from sklearn import preprocessing as pp
from sklearn.externals import joblib
from sklearn.metrics import confusion_matrix
from src.dataset import *
from src.evaluation import *
from src.features import *
__version_info__ = ('1', '0', '0')
__version__ = '.'.join(__version_info__)
final_result = {}
train_start = 0.0
train_end = 0.0
test_start = 0.0
test_end = 0.0
def main(argv):
numpy.random.seed(123456) # let's make randomization predictable
tot_start = timeit.default_timer()
parser = argparse.ArgumentParser(
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
DCASE 2016
Task 1: Acoustic Scene Classification
Baseline system
---------------------------------------------
Tampere University of Technology / Audio Research Group
Author: Toni Heittola ( toni.heittola@tut.fi )
System description
This is an baseline implementation for D-CASE 2016 challenge acoustic scene classification task.
Features: MFCC (static+delta+acceleration)
Classifier: GMM
'''))
# Setup argument handling
parser.add_argument("-development", help="Use the system in the development mode", action='store_true',
default=False, dest='development')
parser.add_argument("-challenge", help="Use the system in the challenge mode", action='store_true',
default=False, dest='challenge')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
# Load parameters from config file
parameter_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.splitext(os.path.basename(__file__))[0] + '.yaml')
params = load_parameters(parameter_file)
params = process_parameters(params)
make_folders(params)
title("DCASE 2016::Acoustic Scene Classification / Baseline System")
# Check if mode is defined
if not (args.development or args.challenge):
args.development = True
args.challenge = False
dataset_evaluation_mode = 'folds'
if args.development and not args.challenge:
print "Running system in development mode"
dataset_evaluation_mode = 'folds'
elif not args.development and args.challenge:
print "Running system in challenge mode"
dataset_evaluation_mode = 'full'
# Get dataset container class
dataset = eval(params['general']['development_dataset'])(data_path=params['path']['data'])
# Fetch data over internet and setup the data
# ==================================================
if params['flow']['initialize']:
dataset.fetch()
# Extract features for all audio files in the dataset
# ==================================================
if params['flow']['extract_features']:
section_header('Feature extraction')
# Collect files in train sets
files = []
for fold in dataset.folds(mode=dataset_evaluation_mode):
for item_id, item in enumerate(dataset.train(fold)):
if item['file'] not in files:
files.append(item['file'])
for item_id, item in enumerate(dataset.test(fold)):
if item['file'] not in files:
files.append(item['file'])
files = sorted(files)
# Go through files and make sure all features are extracted
do_feature_extraction(files=files,
dataset=dataset,
feature_path=params['path']['features'],
params=params['features'],
overwrite=params['general']['overwrite'])
foot()
# Prepare feature normalizers
# ==================================================
if params['flow']['feature_normalizer']:
section_header('Feature normalizer')
do_feature_normalization(dataset=dataset,
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite'])
foot()
# System training
# ==================================================
if params['flow']['train_system']:
section_header('System training')
train_start = timeit.default_timer()
do_system_training(dataset=dataset,
model_path=params['path']['models'],
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
classifier_params=params['classifier']['parameters'],
classifier_method=params['classifier']['method'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite']
)
train_end = timeit.default_timer()
foot()
# System evaluation in development mode
if args.development and not args.challenge:
# System testing
# ==================================================
if params['flow']['test_system']:
section_header('System testing')
test_start = timeit.default_timer()
do_system_testing(dataset=dataset,
feature_path=params['path']['features'],
result_path=params['path']['results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=params['general']['overwrite']
)
test_end = timeit.default_timer()
foot()
# System evaluation
# ==================================================
if params['flow']['evaluate_system']:
section_header('System evaluation')
do_system_evaluation(dataset=dataset,
dataset_evaluation_mode=dataset_evaluation_mode,
result_path=params['path']['results'])
foot()
# System evaluation with challenge data
elif not args.development and args.challenge:
# Fetch data over internet and setup the data
challenge_dataset = eval(params['general']['challenge_dataset'])()
if params['flow']['initialize']:
challenge_dataset.fetch()
# System testing
if params['flow']['test_system']:
section_header('System testing with challenge data')
do_system_testing(dataset=challenge_dataset,
feature_path=params['path']['features'],
result_path=params['path']['challenge_results'],
model_path=params['path']['models'],
feature_params=params['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=True
)
foot()
print " "
print "Your results for the challenge data are stored at [" + params['path']['challenge_results'] + "]"
print " "
tot_end = timeit.default_timer()
print " "
print "Train Time : " + str(train_end - train_start)
print " "
print " "
print "Test Time : " + str(test_end - test_start)
print " "
print " "
print "Total Time : " + str(tot_end - tot_start)
print " "
final_result['train_time'] = train_end - train_start
final_result['test_time'] = test_end - test_start
final_result['tot_time'] = tot_end - tot_start
joblib.dump(final_result, 'result.pkl')
return 0
def process_parameters(params):
"""Parameter post-processing.
Parameters
----------
params : dict
parameters in dict
Returns
-------
params : dict
processed parameters
"""
# Convert feature extraction window and hop sizes seconds to samples
params['features']['mfcc']['win_length'] = int(params['features']['win_length_seconds'] * params['features']['fs'])
params['features']['mfcc']['hop_length'] = int(params['features']['hop_length_seconds'] * params['features']['fs'])
# Copy parameters for current classifier method
params['classifier']['parameters'] = params['classifier_parameters'][params['classifier']['method']]
# Hash
params['features']['hash'] = get_parameter_hash(params['features'])
params['classifier']['hash'] = get_parameter_hash(params['classifier'])
# Paths
params['path']['data'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['data'])
params['path']['base'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['base'])
# Features
params['path']['features_'] = params['path']['features']
params['path']['features'] = os.path.join(params['path']['base'],
params['path']['features'],
params['features']['hash'])
# Feature normalizers
params['path']['feature_normalizers_'] = params['path']['feature_normalizers']
params['path']['feature_normalizers'] = os.path.join(params['path']['base'],
params['path']['feature_normalizers'],
params['features']['hash'])
# Models
params['path']['models_'] = params['path']['models']
params['path']['models'] = os.path.join(params['path']['base'],
params['path']['models'],
params['features']['hash'], params['classifier']['hash'])
# Results
params['path']['results_'] = params['path']['results']
params['path']['results'] = os.path.join(params['path']['base'],
params['path']['results'],
params['features']['hash'], params['classifier']['hash'])
return params
def make_folders(params, parameter_filename='parameters.yaml'):
"""Create all needed folders, and saves parameters in yaml-file for easier manual browsing of data.
Parameters
----------
params : dict
parameters in dict
parameter_filename : str
filename to save parameters used to generate the folder name
Returns
-------
nothing
"""
# Check that target path exists, create if not
check_path(params['path']['features'])
check_path(params['path']['feature_normalizers'])
check_path(params['path']['models'])
check_path(params['path']['results'])
# Save parameters into folders to help manual browsing of files.
# Features
feature_parameter_filename = os.path.join(params['path']['features'], parameter_filename)
if not os.path.isfile(feature_parameter_filename):
save_parameters(feature_parameter_filename, params['features'])
# Feature normalizers
feature_normalizer_parameter_filename = os.path.join(params['path']['feature_normalizers'], parameter_filename)
if not os.path.isfile(feature_normalizer_parameter_filename):
save_parameters(feature_normalizer_parameter_filename, params['features'])
# Models
model_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(model_features_parameter_filename):
save_parameters(model_features_parameter_filename, params['features'])
model_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(model_models_parameter_filename):
save_parameters(model_models_parameter_filename, params['classifier'])
# Results
# Save parameters into folders to help manual browsing of files.
result_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(result_features_parameter_filename):
save_parameters(result_features_parameter_filename, params['features'])
result_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(result_models_parameter_filename):
save_parameters(result_models_parameter_filename, params['classifier'])
def get_feature_filename(audio_file, path, extension='cpickle'):
"""Get feature filename
Parameters
----------
audio_file : str
audio file name from which the features are extracted
path : str
feature path
extension : str
file extension
(Default value='cpickle')
Returns
-------
feature_filename : str
full feature filename
"""
audio_filename = os.path.split(audio_file)[1]
return os.path.join(path, os.path.splitext(audio_filename)[0] + '.' + extension)
def get_feature_normalizer_filename(fold, path, extension='cpickle'):
"""Get normalizer filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
normalizer path
extension : str
file extension
(Default value='cpickle')
Returns
-------
normalizer_filename : str
full normalizer filename
"""
return os.path.join(path, 'scale_fold' + str(fold) + '.' + extension)
def get_model_filename(fold, path, extension='cpickle'):
"""Get model filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
model path
extension : str
file extension
(Default value='cpickle')
Returns
-------
model_filename : str
full model filename
"""
return os.path.join(path, 'model_fold' + str(fold) + '.' + extension)
def get_result_filename(fold, path, extension='txt'):
"""Get result filename
Parameters
----------
fold : int >= 0
evaluation fold number
path : str
result path
extension : str
file extension
(Default value='cpickle')
Returns
-------
result_filename : str
full result filename
"""
if fold == 0:
return os.path.join(path, 'results.' + extension)
else:
return os.path.join(path, 'results_fold' + str(fold) + '.' + extension)
def do_feature_extraction(files, dataset, feature_path, params, overwrite=False):
"""Feature extraction
Parameters
----------
files : list
file list
dataset : class
dataset class
feature_path : str
path where the features are saved
params : dict
parameter dict
overwrite : bool
overwrite existing feature files
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Audio file not found.
"""
# Check that target path exists, create if not
check_path(feature_path)
for file_id, audio_filename in enumerate(files):
# Get feature filename
current_feature_file = get_feature_filename(audio_file=os.path.split(audio_filename)[1], path=feature_path)
progress(title_text='Extracting',
percentage=(float(file_id) / len(files)),
note=os.path.split(audio_filename)[1])
if not os.path.isfile(current_feature_file) or overwrite:
# Load audio data
if os.path.isfile(dataset.relative_to_absolute_path(audio_filename)):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(audio_filename), mono=True,
fs=params['fs'])
else:
raise IOError("Audio file not found [%s]" % audio_filename)
# Extract features
if params['method'] == 'lfcc':
feature_file_txt = get_feature_filename(audio_file=os.path.split(audio_filename)[1],
path=feature_path,
extension='txt')
feature_data = feature_extraction_lfcc(feature_file_txt)
else:
# feature_data['feat'].shape is (1501, 60)
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=params['include_mfcc0'],
include_delta=params['include_delta'],
include_acceleration=params['include_acceleration'],
mfcc_params=params['mfcc'],
delta_params=params['mfcc_delta'],
acceleration_params=params['mfcc_acceleration'])
# Save
save_data(current_feature_file, feature_data)
def do_feature_normalization(dataset, feature_normalizer_path, feature_path, dataset_evaluation_mode='folds',
overwrite=False):
"""Feature normalization
Calculated normalization factors for each evaluation fold based on the training material available.
Parameters
----------
dataset : class
dataset class
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
overwrite : bool
overwrite existing normalizers
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Feature file not found.
"""
# Check that target path exists, create if not
check_path(feature_normalizer_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_normalizer_file = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if not os.path.isfile(current_normalizer_file) or overwrite:
# Initialize statistics
file_count = len(dataset.train(fold))
normalizer = FeatureNormalizer()
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
if os.path.isfile(get_feature_filename(audio_file=item['file'], path=feature_path)):
feature_data = load_data(get_feature_filename(audio_file=item['file'], path=feature_path))['stat']
else:
raise IOError("Feature file not found [%s]" % (item['file']))
# Accumulate statistics
normalizer.accumulate(feature_data)
# Calculate normalization factors
normalizer.finalize()
# Save
save_data(current_normalizer_file, normalizer)
def do_system_training(dataset, model_path, feature_normalizer_path, feature_path, classifier_params,
dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):
"""System training
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
dataset : class
dataset class
model_path : str
path where the models are saved.
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
classifier_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['gmm']
classifier method, currently only GMM supported
(Default value='gmm')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Feature normalizer not found.
Feature file not found.
"""
if classifier_method != 'gmm' and classifier_method != 'dnn':
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Check that target path exists, create if not
check_path(model_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_model_file = get_model_filename(fold=fold, path=model_path)
if not os.path.isfile(current_model_file) or overwrite:
# Load normalizer
feature_normalizer_filename = get_feature_normalizer_filename(fold=fold, path=feature_normalizer_path)
if os.path.isfile(feature_normalizer_filename):
normalizer = load_data(feature_normalizer_filename)
else:
raise IOError("Feature normalizer not found [%s]" % feature_normalizer_filename)
# Initialize model container
model_container = {'normalizer': normalizer, 'models': {}}
# Collect training examples
file_count = len(dataset.train(fold))
data = {}
for item_id, item in enumerate(dataset.train(fold)):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
raise IOError("Features not found [%s]" % (item['file']))
# Scale features
feature_data = model_container['normalizer'].normalize(feature_data)
# Store features per class label
if item['scene_label'] not in data:
data[item['scene_label']] = feature_data
else:
data[item['scene_label']] = numpy.vstack((data[item['scene_label']], feature_data))
le = pp.LabelEncoder()
tot_data = {}
# Train models for each class
for label in data:
progress(title_text='Train models',
fold=fold,
note=label)
if classifier_method == 'gmm':
model_container['models'][label] = mixture.GMM(**classifier_params).fit(data[label])
elif classifier_method == 'dnn':
if 'x' not in tot_data:
tot_data['x'] = data[label]
tot_data['y'] = numpy.repeat(label, len(data[label]), axis=0)
else:
tot_data['x'] = numpy.vstack((tot_data['x'], data[label]))
tot_data['y'] = numpy.hstack((tot_data['y'], numpy.repeat(label, len(data[label]), axis=0)))
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
clf = skflow.TensorFlowDNNClassifier(**classifier_params)
if classifier_method == 'dnn':
tot_data['y'] = le.fit_transform(tot_data['y'])
clf.fit(tot_data['x'], tot_data['y'])
clf.save('dnn/dnnmodel1')
# Save models
save_data(current_model_file, model_container)
def do_system_testing(dataset, result_path, feature_path, model_path, feature_params,
dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):
"""System testing.
If extracted features are not found from disk, they are extracted but not saved.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
feature_path : str
path where the features are saved.
model_path : str
path where the models are saved.
feature_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['gmm']
classifier method, currently only GMM supported
(Default value='gmm')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Model file not found.
Audio file not found.
"""
if classifier_method != 'gmm' and classifier_method != 'dnn':
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Check that target path exists, create if not
check_path(result_path)
for fold in dataset.folds(mode=dataset_evaluation_mode):
current_result_file = get_result_filename(fold=fold, path=result_path)
if not os.path.isfile(current_result_file) or overwrite:
results = []
# Load class model container
model_filename = get_model_filename(fold=fold, path=model_path)
if os.path.isfile(model_filename):
model_container = load_data(model_filename)
else:
raise IOError("Model file not found [%s]" % model_filename)
file_count = len(dataset.test(fold))
for file_id, item in enumerate(dataset.test(fold)):
progress(title_text='Testing',
fold=fold,
percentage=(float(file_id) / file_count),
note=os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
# Load audio
if os.path.isfile(dataset.relative_to_absolute_path(item['file'])):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(item['file']), mono=True,
fs=feature_params['fs'])
else:
raise IOError("Audio file not found [%s]" % (item['file']))
if feature_params['method'] == 'lfcc':
feature_file_txt = get_feature_filename(audio_file=os.path.split(item['file'])[1],
path=feature_path,
extension='txt')
feature_data = feature_extraction_lfcc(feature_file_txt)
else:
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=feature_params['include_mfcc0'],
include_delta=feature_params['include_delta'],
include_acceleration=feature_params['include_acceleration'],
mfcc_params=feature_params['mfcc'],
delta_params=feature_params['mfcc_delta'],
acceleration_params=feature_params['mfcc_acceleration'],
statistics=False)['feat']
# Normalize features
feature_data = model_container['normalizer'].normalize(feature_data)
# Do classification for the block
if classifier_method == 'gmm':
current_result = do_classification_gmm(feature_data, model_container)
current_class = current_result['class']
elif classifier_method == 'dnn':
current_result = do_classification_dnn(feature_data, model_container)
current_class = dataset.scene_labels[current_result['class_id']]
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Store the result
if classifier_method == 'gmm':
results.append((dataset.absolute_to_relative(item['file']),
current_class))
elif classifier_method == 'dnn':
logs_in_tuple = tuple(lo for lo in current_result['logls'])
results.append((dataset.absolute_to_relative(item['file']),
current_class) + logs_in_tuple)
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Save testing results
with open(current_result_file, 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
writer.writerow(result_item)
def do_classification_dnn(feature_data, model_container):
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(15)
logls.fill(-numpy.inf)
model_clf = skflow.TensorFlowEstimator.restore('dnn/dnnmodel1')
logls = numpy.sum(numpy.log(model_clf.predict_proba(feature_data)), 0)
classification_result_id = numpy.argmax(logls)
return {'class_id': classification_result_id,
'logls': logls}
def do_classification_gmm(feature_data, model_container):
"""GMM classification for give feature matrix
model container format:
{
'normalizer': normalizer class
'models' :
{
'office' : mixture.GMM class
'home' : mixture.GMM class
...
}
}
Parameters
----------
feature_data : numpy.ndarray [shape=(t, feature vector length)]
feature matrix
model_container : dict
model container
Returns
-------
result : str
classification result as scene label
"""
# Initialize log-likelihood matrix to -inf
logls = numpy.empty(len(model_container['models']))
logls.fill(-numpy.inf)
for label_id, label in enumerate(model_container['models']):
logls[label_id] = numpy.sum(model_container['models'][label].score(feature_data))
classification_result_id = numpy.argmax(logls)
return {'class': model_container['models'].keys()[classification_result_id],
'logls': logls}
def do_system_evaluation(dataset, result_path, dataset_evaluation_mode='folds'):
"""System evaluation. Testing outputs are collected and evaluated. Evaluation results are printed.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
Returns
-------
nothing
Raises
-------
IOError
Result file not found
"""
dcase2016_scene_metric = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results_fold = []
tot_cm = numpy.zeros((dataset.scene_label_count, dataset.scene_label_count))
for fold in dataset.folds(mode=dataset_evaluation_mode):
dcase2016_scene_metric_fold = DCASE2016_SceneClassification_Metrics(class_list=dataset.scene_labels)
results = []
result_filename = get_result_filename(fold=fold, path=result_path)
if os.path.isfile(result_filename):
with open(result_filename, 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
results.append(row)
else:
raise IOError("Result file not found [%s]" % result_filename)
# Rewrite the result file
if os.path.isfile(result_filename):
with open(result_filename+'2', 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
y_true = (dataset.file_meta(result_item[0])[0]['scene_label'],)
print type(y_true)
print type(result_item)
writer.writerow(y_true + tuple(result_item))
y_true = []
y_pred = []
for result in results:
y_true.append(dataset.file_meta(result[0])[0]['scene_label'])
y_pred.append(result[1])
dcase2016_scene_metric.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
dcase2016_scene_metric_fold.evaluate(system_output=y_pred, annotated_ground_truth=y_true)
results_fold.append(dcase2016_scene_metric_fold.results())
tot_cm += confusion_matrix(y_true, y_pred)
final_result['tot_cm'] = tot_cm
final_result['tot_cm_acc'] = numpy.sum(numpy.diag(tot_cm)) / numpy.sum(tot_cm)
results = dcase2016_scene_metric.results()
final_result['result'] = results
print " File-wise evaluation, over %d folds" % dataset.fold_count
fold_labels = ''
separator = ' =====================+======+======+==========+ +'
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_labels += " {:8s} |".format('Fold' + str(fold))
separator += "==========+"
print " {:20s} | {:4s} : {:4s} | {:8s} | |".format('Scene label', 'Nref', 'Nsys', 'Accuracy') + fold_labels
print separator
for label_id, label in enumerate(sorted(results['class_wise_accuracy'])):
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold - 1]['class_wise_accuracy'][label] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format(label,
results['class_wise_data'][label]['Nref'],
results['class_wise_data'][label]['Nsys'],
results['class_wise_accuracy'][
label] * 100) + fold_values
print separator
fold_values = ''
if dataset.fold_count > 1:
for fold in dataset.folds(mode=dataset_evaluation_mode):
fold_values += " {:5.1f} % |".format(results_fold[fold - 1]['overall_accuracy'] * 100)
print " {:20s} | {:4d} : {:4d} | {:5.1f} % | |".format('Overall accuracy',
results['Nref'],
results['Nsys'],
results['overall_accuracy'] * 100) + fold_values
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
| mit |
feranick/GES_AT | Archived/TVoc-TJsc_plots/resultsWindow.py | 1 | 29340 | '''
ResultsWindow.py
-------------
Classes for providing a graphical user interface
for the resultsWindow
Copyright (C) 2017-2018 Nicola Ferralis <ferralis@mit.edu>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
'''
import sys, random, math, json, requests, webbrowser
import numpy as np
import pandas as pd
from datetime import datetime
from PyQt5.QtWidgets import (QMainWindow,QPushButton,QVBoxLayout,QFileDialog,QWidget,
QGridLayout,QGraphicsView,QLabel,QComboBox,QLineEdit,
QMenuBar,QStatusBar, QApplication,QTableWidget,
QTableWidgetItem,QAction,QHeaderView,QMenu,QHBoxLayout,
QAbstractItemView)
from PyQt5.QtCore import (QRect,pyqtSlot,Qt)
from PyQt5.QtGui import (QColor,QCursor)
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
from .dataManagement import *
from . import logger
'''
Results Window
'''
class ResultsWindow(QMainWindow):
def __init__(self, parent=None):
super(ResultsWindow, self).__init__(parent)
self.deviceID = np.zeros((0,1))
self.perfData = np.zeros((0,9))
self.JV = np.array([])
self.setupDataFrame()
self.csvFolder = self.parent().config.csvSavingFolder
self.initUI()
self.initPlots(self.perfData)
self.initJVPlot()
self.show()
# Define UI elements
def initUI(self):
self.setGeometry(380, 30, 1150, 950)
self.setWindowTitle('Results Panel')
self.setFixedSize(self.size())
# A figure instance to plot on
self.figureTJsc = plt.figure()
self.figureTVoc = plt.figure()
self.figureMPP = plt.figure()
self.figureJVresp = plt.figure()
self.figurePVresp = plt.figure()
self.figureJVresp.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.15)
self.figurePVresp.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.15)
self.figureTJsc.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.21)
self.figureTVoc.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.21)
self.figureMPP.subplots_adjust(left=0.15, right=0.95, top=0.95, bottom=0.21)
self.centralwidget = QWidget(self)
self.centralwidget.setObjectName("centralwidget")
self.gridLayoutWidget = QWidget(self.centralwidget)
self.gridLayoutWidget.setGeometry(QRect(20, 30, 1100, 710))
self.HLayout = QHBoxLayout(self.gridLayoutWidget)
self.jvVLayout = QVBoxLayout()
self.canvasJVresp = FigureCanvas(self.figureJVresp)
#self.toolbarJVresp = NavigationToolbar(self.canvasJVresp, self)
self.toolbarJVresp = CustomToolbar(self.canvasJVresp, self.figureJVresp, self)
self.toolbarJVresp.setMaximumHeight(30)
self.toolbarJVresp.setStyleSheet("QToolBar { border: 0px }")
self.canvasPVresp = FigureCanvas(self.figurePVresp)
#self.toolbarPVresp = NavigationToolbar(self.canvasPVresp, self)
self.toolbarPVresp = CustomToolbar(self.canvasPVresp, self.figurePVresp, self)
self.toolbarPVresp.setMaximumHeight(30)
self.toolbarPVresp.setStyleSheet("QToolBar { border: 0px }")
self.jvVLayout.addWidget(self.toolbarJVresp)
self.jvVLayout.addWidget(self.canvasJVresp)
self.jvVLayout.addWidget(self.toolbarPVresp)
self.jvVLayout.addWidget(self.canvasPVresp)
self.HLayout.addLayout(self.jvVLayout)
self.VLayout = QVBoxLayout()
self.canvasTJsc = FigureCanvas(self.figureTJsc)
self.toolbarTJsc = NavigationToolbar(self.canvasTJsc, self)
self.toolbarTJsc.setMaximumHeight(30)
self.toolbarTJsc.setStyleSheet("QToolBar { border: 0px }")
self.VLayout.addWidget(self.toolbarTJsc)
self.VLayout.addWidget(self.canvasTJsc)
self.canvasTVoc = FigureCanvas(self.figureTVoc)
self.toolbarTVoc = NavigationToolbar(self.canvasTVoc, self)
self.toolbarTVoc.setMaximumHeight(30)
self.toolbarTVoc.setStyleSheet("QToolBar { border: 0px }")
self.VLayout.addWidget(self.toolbarTVoc)
self.VLayout.addWidget(self.canvasTVoc)
self.canvasMPP = FigureCanvas(self.figureMPP)
self.toolbarMPP = NavigationToolbar(self.canvasMPP, self)
self.toolbarMPP.setMaximumHeight(30)
self.toolbarMPP.setStyleSheet("QToolBar { border: 0px }")
self.VLayout.addWidget(self.toolbarMPP)
self.VLayout.addWidget(self.canvasMPP)
self.HLayout.addLayout(self.VLayout)
self.resTableW = 1100
self.resTableH = 145
self.resTableWidget = QTableWidget(self.centralwidget)
self.resTableWidget.setGeometry(QRect(20, 770, self.resTableW, self.resTableH))
self.resTableWidget.setColumnCount(11)
self.resTableWidget.setRowCount(0)
self.resTableWidget.setItem(0,0, QTableWidgetItem(""))
self.resTableWidget.setHorizontalHeaderItem(0,QTableWidgetItem("Device ID"))
self.resTableWidget.setHorizontalHeaderItem(1,QTableWidgetItem("Av Voc [V]"))
self.resTableWidget.setHorizontalHeaderItem(2,QTableWidgetItem(u"Av Jsc [mA/cm\u00B2]"))
self.resTableWidget.setHorizontalHeaderItem(3,QTableWidgetItem("Av VPP [V]"))
self.resTableWidget.setHorizontalHeaderItem(4,QTableWidgetItem("Av MPP [mW/cm\u00B2]"))
self.resTableWidget.setHorizontalHeaderItem(5,QTableWidgetItem("Av FF"))
self.resTableWidget.setHorizontalHeaderItem(6,QTableWidgetItem("Av PCE [%]"))
self.resTableWidget.setHorizontalHeaderItem(7,QTableWidgetItem("Illumination"))
self.resTableWidget.setHorizontalHeaderItem(8,QTableWidgetItem("Tracking time [s]"))
self.resTableWidget.setHorizontalHeaderItem(9,QTableWidgetItem("Acq Date"))
self.resTableWidget.setHorizontalHeaderItem(10,QTableWidgetItem("Acq Time"))
self.resTableWidget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.resTableWidget.setSelectionBehavior(QAbstractItemView.SelectRows)
self.resTableWidget.itemClicked.connect(self.onCellClick)
#self.resTableWidget.itemDoubleClicked.connect(self.onCellDoubleClick)
self.setCentralWidget(self.centralwidget)
# Make Menu for plot related calls
self.menuBar = QMenuBar(self)
self.menuBar.setGeometry(0,0,1150,25)
self.loadMenu = QAction("&Load Data", self)
self.loadMenu.setShortcut("Ctrl+o")
self.loadMenu.setStatusTip('Load csv data from saved file')
self.loadMenu.triggered.connect(self.read_csv)
self.directoryMenu = QAction("&Set directory for saved files", self)
self.directoryMenu.setShortcut("Ctrl+d")
self.directoryMenu.setStatusTip('Set directory for saved files')
self.directoryMenu.triggered.connect(self.set_dir_saved)
self.clearMenu = QAction("&Clear Plots", self)
self.clearMenu.setShortcut("Ctrl+x")
self.clearMenu.setStatusTip('Clear plots')
self.clearMenu.triggered.connect(lambda: self.clearPlots(True))
fileMenu = self.menuBar.addMenu('&File')
fileMenu.addAction(self.loadMenu)
fileMenu.addAction(self.directoryMenu)
plotMenu = self.menuBar.addMenu('&Plot')
plotMenu.addAction(self.clearMenu)
self.parent().viewWindowMenus(self.menuBar, self.parent())
self.statusbar = QStatusBar(self)
self.statusbar.setObjectName("statusbar")
self.setStatusBar(self.statusbar)
# Set directory for saved data
def set_dir_saved(self):
self.csvFolder = str(QFileDialog.getExistingDirectory(self, "Select Directory"))
self.parent().config.conf['System']['csvSavingFolder'] = str(self.csvFolder)
self.parent().config.saveConfig(self.parent().config.configFile)
self.parent().config.readConfig(self.parent().config.configFile)
msg = "CSV Files will be saved in: "+self.csvFolder
print(msg)
logger.info(msg)
# Define axis parametrs for plots
def plotSettings(self, ax):
ax.tick_params(axis='both', which='major', labelsize=8)
ax.tick_params(axis='both', which='minor', labelsize=8)
# Initialize Time-based plots
def initPlots(self, data):
self.figureTJsc.clf()
self.axTJsc = self.figureTJsc.add_subplot(111)
self.plotSettings(self.axTJsc)
self.axTJsc.set_xlabel('Time [s]',fontsize=8)
self.axTJsc.set_ylabel('Jsc [mA/cm$^2$]',fontsize=8)
self.axTJsc.set_autoscale_on(True)
self.axTJsc.autoscale_view(True,True,True)
self.canvasTJsc.draw()
self.lineTJsc, = self.axTJsc.plot(data[:,0],data[:,2], '.-',linewidth=0.5)
self.figureTVoc.clf()
self.axTVoc = self.figureTVoc.add_subplot(111)
self.plotSettings(self.axTVoc)
self.axTVoc.set_xlabel('Time [s]',fontsize=8)
self.axTVoc.set_ylabel('Voc [V]',fontsize=8)
self.axTVoc.set_autoscale_on(True)
self.axTVoc.autoscale_view(True,True,True)
self.canvasTVoc.draw()
self.lineTVoc, = self.axTVoc.plot(data[:,0],data[:,1], '.-',linewidth=0.5)
self.figureMPP.clf()
self.axMPP = self.figureMPP.add_subplot(111)
self.plotSettings(self.axMPP)
self.axMPP.set_xlabel('Time [s]',fontsize=8)
self.axMPP.set_ylabel('Max power point \n[mW/cm$^2$]',fontsize=8)
self.axMPP.set_autoscale_on(True)
self.axMPP.autoscale_view(True,True,True)
self.canvasMPP.draw()
self.lineMPP, = self.axMPP.plot(data[:,0],data[:,4], '.-',linewidth=0.5)
# Initialize JV and PV plots
def initJVPlot(self):
self.figureJVresp.clf()
self.figurePVresp.clf()
self.axJVresp = self.figureJVresp.add_subplot(111)
self.plotSettings(self.axJVresp)
self.axJVresp.set_xlabel('Voltage [V]',fontsize=8)
self.axJVresp.set_ylabel('Current density [mA/cm$^2$]',fontsize=8)
self.axJVresp.axvline(x=0, linewidth=0.5)
self.axJVresp.axhline(y=0, linewidth=0.5)
self.axPVresp = self.figurePVresp.add_subplot(111)
self.plotSettings(self.axPVresp)
self.axPVresp.set_xlabel('Voltage [V]',fontsize=8)
self.axPVresp.set_ylabel('Power density [mW/cm$^2$]',fontsize=8)
self.axPVresp.axvline(x=0, linewidth=0.5)
self.axPVresp.axhline(y=0, linewidth=0.5)
self.canvasJVresp.draw()
self.canvasPVresp.draw()
# Plot Transient Jsc
def plotTJsc(self, data):
self.toolbarTJsc.update()
self.lineTJsc.set_data(data[:,2].astype(float), data[:,4].astype(float))
self.axTJsc.relim()
self.axTJsc.autoscale_view(True,True,True)
self.canvasTJsc.draw()
# Plot Transient Voc
def plotTVoc(self, data):
self.toolbarTVoc.update()
self.lineTVoc.set_data(data[:,2].astype(float), data[:,3].astype(float))
self.axTVoc.relim()
self.axTVoc.autoscale_view(True,True,True)
self.canvasTVoc.draw()
# Plot MPP with tracking
def plotMPP(self, data):
self.toolbarMPP.update()
self.lineMPP.set_data(data[:,2].astype(float), data[:,6].astype(float))
self.axMPP.relim()
self.axMPP.autoscale_view(True,True,True)
self.canvasMPP.draw()
# Plot JV response
def plotJVresp(self, JV):
self.initJVPlot()
self.toolbarJVresp.update()
self.toolbarPVresp.update()
self.axJVresp.plot(JV[:,0],JV[:,1], '.-',linewidth=0.5, label='Forw')
self.axJVresp.plot(JV[:,2],JV[:,3], '.-',linewidth=0.5, label='Back')
self.axPVresp.plot(JV[:,0],JV[:,0]*JV[:,1], '.-',linewidth=0.5, label='Forw')
self.axPVresp.plot(JV[:,2],JV[:,2]*JV[:,3], '.-',linewidth=0.5, label='Back')
self.axJVresp.legend(loc='lower left')
self.axPVresp.legend(loc='upper left')
if self.parent().config.logPlotJV:
self.axJVresp.set_yscale('log')
self.axPVresp.set_yscale('log')
self.figureJVresp.tight_layout()
self.figurePVresp.tight_layout()
self.canvasJVresp.draw()
self.canvasPVresp.draw()
# Clear all plots and fields
def clearPlots(self, includeTable):
self.setWindowTitle('Results Panel')
self.deviceID = np.zeros((0,1))
self.perfData = np.zeros((0,8))
self.JV = np.array([])
self.initPlots(self.perfData)
self.initJVPlot()
if includeTable is True:
self.resTableWidget.setRowCount(0)
QApplication.processEvents()
# Action upon selecting a row in the table.
@pyqtSlot()
def onCellClick(self):
row = self.resTableWidget.selectedItems()[0].row()
for j in range(self.resTableWidget.columnCount()):
for i in range(self.resTableWidget.rowCount()):
self.resTableWidget.item(i,j).setBackground(QColor(255,255,255))
for j in range(self.resTableWidget.columnCount()):
self.resTableWidget.item(row,j).setBackground(QColor(0,255,0))
try:
self.setWindowTitle('Results Panel - Device: '+ str(self.dfTotDeviceID.iat[0,row][0][0]))
self.plotData(self.dfTotDeviceID.iat[0,row],
self.dfTotPerfData.iat[0,row],
self.dfTotJV.iat[0,row])
except:
pass
# Process Key Events
def keyPressEvent(self, event):
if self.resTableWidget.rowCount() > 0:
if event.key() == Qt.Key_Delete:
selectedRows = list(set([ i.row() for i in self.resTableWidget.selectedItems()]))
for row in selectedRows[::-1]:
self.selectDeviceRemove(row)
# Enable right click on substrates for saving locally and delete
def contextMenuEvent(self, event):
self.menu = QMenu(self)
rPos = self.resTableWidget.mapFromGlobal(QCursor.pos())
if rPos.x()>0 and rPos.x()<self.resTableW and \
rPos.y()>0 and rPos.y()<self.resTableH and \
self.resTableWidget.rowCount() > 0 :
selectCellLoadAction = QAction("&Load from csv...", self)
selectCellLoadAction.setShortcut("Ctrl+o")
selectCellLoadAction.setStatusTip('Load csv data from saved file')
selectCellSaveAction = QAction('Save locally', self)
selectCellSaveAction.setShortcut("Ctrl+s")
viewDMEntryAction = QAction("&View Entry in Database", self)
viewDMEntryAction.setShortcut("Ctrl+v")
selectCellRemoveAction = QAction('Remove...', self)
selectCellRemoveAction.setShortcut("Del")
selectRemoveAllAction = QAction('Remove All...', self)
selectRemoveAllAction.setShortcut("Shift+Del")
self.menu.addAction(selectCellRemoveAction)
self.menu.addAction(selectRemoveAllAction)
self.menu.addSeparator()
self.menu.addAction(selectCellLoadAction)
self.menu.addAction(selectCellSaveAction)
self.menu.addSeparator()
self.menu.addAction(viewDMEntryAction)
self.menu.popup(QCursor.pos())
QApplication.processEvents()
selectCellLoadAction.triggered.connect(self.read_csv)
selectedRows = list(set([ i.row() for i in self.resTableWidget.selectedItems()]))
for row in selectedRows[::-1]:
selectCellSaveAction.triggered.connect(lambda: self.selectDeviceSaveLocally(row))
selectCellRemoveAction.triggered.connect(lambda: self.selectDeviceRemove(row))
selectRemoveAllAction.triggered.connect(lambda: self.clearPlots(True))
viewDMEntryAction.triggered.connect(lambda: self.redirectToDM(self.dfTotDeviceID.iat[0,row][0][0]))
# Logic to save locally devices selected from results table
def selectDeviceSaveLocally(self, row):
try:
self.save_csv(self.dfTotDeviceID.iat[0,row][0][0],
self.dfTotAcqParams.iloc[[row]],
self.dfTotPerfData.iat[0,row],
self.dfTotJV.iat[0,row])
except:
print("Error: cannot be saved")
# Logic to remove data from devices selected from results table
def selectDeviceRemove(self, row):
self.dfTotDeviceID.drop(self.dfTotDeviceID.columns[row], axis=1)
self.dfTotPerfData.drop(self.dfTotPerfData.columns[row], axis=1)
self.dfTotJV.drop(self.dfTotJV.columns[row], axis=1)
for l in self.axJVresp.get_lines():
l.remove()
for l in self.axPVresp.get_lines():
l.remove()
print("Removed acquisition from table: ",str(self.dfTotDeviceID.iat[0,row]))
self.canvasJVresp.draw()
self.canvasPVresp.draw()
self.resTableWidget.removeRow(row)
# Add row and initialize it within the table
def setupResultTable(self):
self.resTableWidget.insertRow(self.resTableWidget.rowCount())
self.resTableWidget.setItem(self.resTableWidget.rowCount()-1,0,
QTableWidgetItem())
for j in range(self.resTableWidget.columnCount()):
self.resTableWidget.setItem(self.resTableWidget.rowCount(),j,
QTableWidgetItem())
self.lastRowInd = self.resTableWidget.rowCount()-1
for f in range(9):
self.resTableWidget.setItem(self.lastRowInd, 0,QTableWidgetItem())
# Create internal dataframe with all the data.
# This is needed for plotting data after acquisition
def setupDataFrame(self):
self.dfTotDeviceID = pd.DataFrame()
self.dfTotPerfData = pd.DataFrame()
self.dfTotAcqParams = pd.DataFrame()
self.dfTotJV = pd.DataFrame()
# Process data from devices
def processDeviceData(self, deviceID, dfAcqParams, perfData, JV, flag, tracking):
# create numpy arrays for all devices as well as dataframes for csv and jsons
self.deviceID = np.vstack((self.deviceID, np.array([deviceID])))
self.perfData = perfData
self.JV = JV
# Populate table.
self.fillTableData(deviceID, self.perfData)
QApplication.processEvents()
# Plot results
self.plotData(self.deviceID,self.perfData, JV)
QApplication.processEvents()
if flag is True:
# Save to internal dataFrame
self.makeInternalDataFrames(self.lastRowInd,
self.deviceID,self.perfData, dfAcqParams, self.JV)
# Enable/disable saving to file
# Using ALT with Start Acquisition button overrides the config settings.
if self.parent().config.saveLocalCsv == True or \
self.parent().acquisition.modifiers == Qt.AltModifier:
self.save_csv(deviceID, dfAcqParams, self.perfData, self.JV)
if self.parent().config.submitToDb == True:
self.submit_DM(deviceID, dfAcqParams, self.perfData, self.JV, tracking)
# Plot data from devices
def plotData(self, deviceID, perfData, JV):
self.plotJVresp(JV)
self.plotTVoc(perfData)
self.plotMPP(perfData)
self.plotTJsc(perfData)
self.show()
# Create internal dataframe with all the data.
# This is needed for plotting data after acquisition
def makeInternalDataFrames(self, index,deviceID,perfData,dfAcqParams,JV):
self.dfTotDeviceID[index] = [deviceID]
self.dfTotPerfData[index] = [perfData]
self.dfTotAcqParams = self.dfTotAcqParams.append(dfAcqParams)
self.dfTotJV[index] = [JV]
# Create DataFrames for saving csv and jsons
def makeDFPerfData(self,perfData):
dfPerfData = pd.DataFrame({'Time step': perfData[:,2], 'Voc': perfData[:,3],
'Jsc': perfData[:,4], 'VPP' : perfData[:,5], 'MPP': perfData[:,6],
'FF': perfData[:,7], 'PCE': perfData[:,8], 'Light' : perfData[:,9],
'Acq Date': perfData[:,0], 'Acq Time': perfData[:,1],
})
dfPerfData = dfPerfData[['Acq Date','Acq Time','Time step', 'Voc',
'Jsc', 'VPP', 'MPP','FF','PCE', 'Light']]
return dfPerfData
def makeDFJV(self,JV,set):
dfJV = pd.DataFrame({'V':JV[:,2*set+0], 'J':JV[:,2*set+1]})
dfJV = dfJV[['V', 'J']]
listJV = dict(dfJV.to_dict(orient='split'))
listJV['columnlabel'] = listJV.pop('columns')
listJV['output'] = listJV.pop('data')
del listJV['index']
return dfJV, listJV
### Submit json for device data to Data-Management
def submit_DM(self,deviceID, dfAcqParams, perfData, JV, tracking):
dfPerfData = self.makeDFPerfData(perfData)
# Prepare json-data
jsonData = {'itemId' : deviceID[-1]}
listSubstrateName = {'substrate' : deviceID[:-1]}
listMeasType = {'measType' : 'device'}
listEquipment = {'equipment' : 'auto-testing'}
listAcqParams = dict(dfAcqParams.to_dict(orient='list'))
jsonData.update(listMeasType)
jsonData.update(listEquipment)
jsonData.update(listSubstrateName)
jsonData.update(listAcqParams)
listName = {'name': 'JV_r'}
_, listJV0 = self.makeDFJV(JV,0)
jsonData.update(listJV0)
if tracking is False:
listPerfData = dict(dfPerfData.iloc[[0]].to_dict('list'))
jsonData.update(listPerfData)
jsonData.update(listName)
jsonData1 = jsonData.copy()
jsonData1 = jsonData.copy()
listName1 = {'name': 'JV_f'}
jsonData1.update(listName1)
listPerfData1 = dict(dfPerfData.iloc[[1]].to_dict('list'))
jsonData1.update(listPerfData1)
_, listJV1 = self.makeDFJV(JV,1)
jsonData1.update(listJV1)
else:
listName = {'name': 'Tracking'}
listPerfData = dict(dfPerfData.to_dict('split'))
listPerfData['columnlabel'] = listPerfData.pop('columns')
listPerfData['output'] = listPerfData.pop('data')
del listPerfData['index']
jsonData.update(listPerfData)
jsonData.update(listName)
self.dbConnectInfo = self.parent().dbconnectionwind.getDbConnectionInfo()
try:
# This is for direct submission via pymongo
conn = DataManagement(self.dbConnectInfo)
client, _ = conn.connectDB()
db = client[self.dbConnectInfo[2]]
db_entry = db.Measurement.insert_one(json.loads(json.dumps(jsonData)))
msg = " Device " + deviceID + \
": submission to DM via Mongo successful\n (ids: " + \
str(db_entry.inserted_id)
if tracking is False:
db_entry1 = db.Measurement.insert_one(json.loads(json.dumps(jsonData1)))
msg += ", "+str(db_entry1.inserted_id)
msg += ")"
except:
try:
msg = " Submission to DM via Mongo: failed. Trying via HTTP POST"
print(msg)
logger.info(msg)
#This is for using POST HTTP
url = "http://"+self.dbConnectInfo[0]+":"+self.dbConnectInfo[5]+self.dbConnectInfo[6]
if tracking is False:
req = requests.post(url, json=jsonData)
req1 = requests.post(url, json=jsonData1)
if req.status_code == 200 and req1.status_code == 200:
msg = " Device " + deviceID + \
", submission to DM via HTTP POST successful\n (ETag: " + \
str(req.headers['ETag'])+", "+str(req1.headers['ETag'])+")"
else:
req.raise_for_status()
req1.raise_for_status()
else:
if req.status_code == 200:
req = requests.post(url, json=jsonData)
msg = " Device " + deviceID + \
", submission to DM via HTTP POST successful\n (ETag: " + \
str(req.headers['ETag'])+")"
else:
req.raise_for_status()
except:
msg = " Connection to DM server: failed. Saving local file"
self.save_csv(deviceID, dfAcqParams, perfData, JV)
print(msg)
logger.info(msg)
### Save device acquisition as csv
def save_csv(self,deviceID, dfAcqParams, perfData, JV):
dfPerfData = self.makeDFPerfData(perfData)
dfJV0,_ = self.makeDFJV(JV,0)
dfJV1,_ = self.makeDFJV(JV,1)
dfJV0 = dfJV0.rename(columns={"V": "V_r", "J": "J_r"})
dfJV1 = dfJV1.rename(columns={"V": "V_f", "J": "J_f"})
dfDeviceID = pd.DataFrame({'Device':[deviceID]})
dfTot = pd.concat([dfDeviceID, dfPerfData], axis = 1)
dfTot = pd.concat([dfTot,dfJV0], axis = 1)
dfTot = pd.concat([dfTot,dfJV1], axis = 1)
dfTot = pd.concat([dfTot,dfAcqParams], axis = 1)
dateTimeTag = str(datetime.now().strftime('%Y%m%d-%H%M%S'))
csvFilename = deviceID+"_"
if dfPerfData.at[0,'Light'] == "0.0":
csvFilename+="dark_"
if dfPerfData.at[0,'Time step'] != "0.0":
csvFilename += "tracking_"
csvFilename += dateTimeTag + ".csv"
dfTot.to_csv(self.csvFolder+"/"+csvFilename, sep=',', index=False)
msg=" Device data saved on: "+self.csvFolder+"/"+csvFilename
print(msg)
logger.info(msg)
### Load data from saved CSV
def read_csv(self):
filenames = QFileDialog.getOpenFileNames(self,
"Open csv data", "","*.csv")
try:
for filename in filenames[0]:
print("Open saved device data from: ", filename)
dftot = pd.read_csv(filename, na_filter=False)
deviceID = dftot.at[0,'Device']
perfData = dftot.as_matrix()[range(0,np.count_nonzero(dftot['Acq Date']))][:,range(1,11)]
JV = dftot.as_matrix()[range(0,np.count_nonzero(dftot['V_r']))][:,np.arange(11,15)].astype(float)
dfAcqParams = dftot.loc[0:1, 'Acq Soak Voltage':'Comments']
self.plotData(deviceID, perfData, JV)
self.setupResultTable()
self.fillTableData(deviceID, perfData)
self.makeInternalDataFrames(self.lastRowInd, [[deviceID]], perfData, dfAcqParams, np.array(JV))
except:
print("Loading files failed")
# Populate result table.
def fillTableData(self, deviceID, obj):
if str(obj[0,9]) == "1.0":
light = "ON"
else:
light = "OFF"
self.resTableWidget.setItem(self.lastRowInd, 0,QTableWidgetItem(deviceID))
for i in range(1,7,1):
self.resTableWidget.setItem(self.lastRowInd, i,QTableWidgetItem("{0:0.3f}".format(np.mean(obj[:,i+2].astype(float)))))
try:
self.resTableWidget.item(self.lastRowInd,i).setToolTip("F:{0:0.3f}".format(float(obj[0,i+2]))+" / B:{0:0.3f}".format(float(obj[1,i+2])))
except:
pass
self.resTableWidget.setItem(self.lastRowInd, 7,QTableWidgetItem(light)) #Light
self.resTableWidget.setItem(self.lastRowInd, 9,QTableWidgetItem(obj[0,0]))
self.resTableWidget.setItem(self.lastRowInd, 10,QTableWidgetItem(obj[0,1]))
if float(obj[0,2]) == 0.:
self.resTableWidget.setItem(self.lastRowInd, 8,QTableWidgetItem("None")) #track_time
else:
self.resTableWidget.setItem(self.lastRowInd, 8,QTableWidgetItem("{0:0.3f}".format(float(obj[0,2])))) #track_time
# Redirect to DM page for substrate/device
def redirectToDM(self, deviceID):
print("Opening entry in DM for substrate:",deviceID[:10])
webbrowser.open("http://gridedgedm.mit.edu/lots/view/"+str(deviceID[:10]))
'''
Custom Toolbar
'''
class CustomToolbar(NavigationToolbar):
def __init__(self, figure_canvas, figure, parent= None):
self.figure = figure
self.figure_canvas = figure_canvas
self.toolitems +=(('Log/Lin', "Log/Lin scale", "Log/Lin scale", 'log_lin_scale'),)
NavigationToolbar.__init__(self, figure_canvas, parent=parent)
def log_lin_scale(self):
if len(self.figure.gca().lines) > 2:
if self.figure.gca().get_yscale() == 'log':
self.figure.gca().set_yscale('linear')
else:
self.figure.gca().set_yscale('log')
self.figure_canvas.draw()
| gpl-3.0 |
siconos/siconos-deb | examples/Control/SMC/SMCExampleImplicitOT2-noCplugin-sage.py | 1 | 3803 | #!/usr/bin/env python
# Siconos is a program dedicated to modeling, simulation and control
# of non smooth dynamical systems.
#
# Copyright 2016 INRIA.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We have first to import sage, so it won't shadow some function (like plot...)
# You need the env. variable DOT_SAGE to point to the right location !
import sys
try:
from sage.all import *
# this is needed since sage uses mpfr by default ...
RealNumber = float
Integer = int
except ImportError:
print('sage is not installed, exiting')
sys.exit(0)
# Other import
from siconos.kernel import FirstOrderLinearDS, getMatrix
from siconos.control.simulation import ControlZOHSimulation
from siconos.control.sensor import LinearSensor
from siconos.control.controller import LinearSMCOT2
import matplotlib
matplotlib.use('Agg')
from matplotlib.pyplot import subplot, title, plot, grid, savefig
from numpy import array, eye, empty, zeros, savetxt
from math import ceil, sin
from numpy.linalg import norm
# Some stupid symbolic computations
x = var('x')
g = vector((-cos(50*x), cos(50*x)))/50
f = g.diff(x)
# Derive our own version of FirstOrderLinearDS
class MyFOLDS(FirstOrderLinearDS):
def computeb(self, time):
tmpz = self.z()
# XXX fix this !
if len(tmpz) != 2:
print("DEBUG z has length ", len(tmpz))
return
# XXX we need to find a smarter way to do things here
# we need to convert from vector (sage) to arrayish
u = array(f(x=time).list(), dtype = float) + tmpz
self.setb(u)
# variable declaration
ndof = 2 # Number of degrees of freedom of your system
t0 = 0.0 # start time
T = 1 # end time
h = 1.0e-4 # time step for simulation
hControl = 1.0e-2 # time step for control
Xinit = 1.0 # initial position
theta = 0.5
N = ceil((T-t0)/h + 10) # number of time steps
outputSize = 5 # number of variable to store at each time step
# Matrix declaration
A = zeros((ndof, ndof))
x0 = [Xinit, -Xinit]
sensorC = eye(ndof)
Csurface = [[0, 1.0]]
# Simple check
if h > hControl:
print("hControl must be bigger than h")
exit(1)
# Declaration of the Dynamical System
processDS = MyFOLDS(x0, A)
# XXX b is not automatically created ...
processDS.setb([0, 0])
# Control Simulation
sim = ControlZOHSimulation(t0, T, h)
sim.setSaveOnlyMainSimulation(True)
sim.addDynamicalSystem(processDS)
# Actuator, Sensor & ControlManager
sens = LinearSensor(processDS, sensorC)
sim.addSensor(sens, hControl)
act = LinearSMCOT2(sens)
act.setCsurface(Csurface)
act.setB(Brel)
sim.addActuator(act, hControl)
# Initialization
sim.initialize()
# Run simulation
sim.run()
# Get data
dataPlot = sim.data()
# Save to disk
savetxt('SMCExampleImplicitOT2-noCplugin-sage-py.dat', dataPlot)
# Plot interesting data
subplot(411)
title('x1')
plot(dataPlot[:, 0], dataPlot[:, 1])
grid()
subplot(412)
title('x2')
plot(dataPlot[:, 0], dataPlot[:, 2])
grid()
subplot(413)
title('u')
plot(dataPlot[:, 0], dataPlot[:, 3])
savefig('ismcOT2_x_u.png')
# compare with the reference
ref = getMatrix(SimpleMatrix("SMCExampleImplicitOT2-py.ref"))
print("%19e" % norm(dataPlot - ref))
if (norm(dataPlot - ref) > 1e-12):
print(dataPlot - ref)
print("Warning. The result is rather different from the reference file.")
| apache-2.0 |
arjoly/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
mrry/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 8 | 21806 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
if batch_size is None:
batch_size = x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
y_shape = list(y_shape[1:]) if len(y_shape) > 1 else []
# Skip first dimension if it is 1.
if y_shape and y_shape[0] == 1:
y_shape = y_shape[1:]
if n_classes is not None and n_classes > 1:
output_shape = [batch_size] + y_shape + [n_classes]
else:
output_shape = [batch_size] + y_shape
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(
x, y, n_classes, batch_size=None, shuffle=True, epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or iterable.
y: numpy, pandas or Dask array or iterable.
n_classes: number of classes.
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
chunk = []
for data in x:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or iterable.
batch_size: Size of batches to split data into.
If `None`, returns one batch of full size.
Returns:
List or iterator of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(
self, x, y, n_classes, batch_size=None, shuffle=True, random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: Feature Nd numpy matrix of shape `[n_samples, n_features, ...]`.
y: Target vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence
of targets. Can be `None` for unsupervised setting.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion.
batch_size: Mini-batch size to accumulate.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features.
y: Input target.
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input.
output_shape: Shape of the output.
input_dtype: DType of input.
output_dtype: DType of output.
"""
self._x = check_array(x, dtype=x.dtype)
# self.n_classes is None means we're passing in raw target indices.
y_dtype = (
np.int64 if n_classes is not None and n_classes > 1 else np.float32)
if n_classes is not None:
self._y = (None if y is None else check_array(y, dtype=y_dtype))
elif isinstance(y, list):
self._y = np.array(y)
else:
self._y = y
self.n_classes = n_classes
self.max_epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
self._x.shape, None if self._y is None else self._y.shape, n_classes,
batch_size)
# Input dtype matches dtype of x.
self._input_dtype = _check_dtype(self._x.dtype)
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None or self._y is None:
self._output_dtype = np.float32
else:
self._output_dtype = _check_dtype(self._y.dtype)
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if self._shuffle:
self.indices = self.random_state.permutation(self._x.shape[0])
else:
self.indices = np.array(range(self._x.shape[0]))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(dtypes.int32, [1],
name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
input_shape = [None] + self.input_shape[1:]
self._input_placeholder = array_ops.placeholder(
dtypes.as_dtype(self._input_dtype),
input_shape,
name='input')
if self.output_shape is None:
self._output_placeholder = None
else:
output_shape = [None] + self.output_shape[1:]
self._output_placeholder = array_ops.placeholder(
dtypes.as_dtype(self._output_dtype),
output_shape,
name='output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be None.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
end = min(self._x.shape[0], self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# Assign input features from random indices.
inp = (
np.array(_access(self._x, batch_indices)).reshape(
(batch_indices.shape[0], 1))
if len(self._x.shape) == 1 else _access(self._x, batch_indices))
feed_dict[self._input_placeholder.name] = inp
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= self._x.shape[0]:
self.indices = self.random_state.permutation(self._x.shape[0])
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# assign labels from random indices
self.output_shape[0] = batch_indices.shape[0]
out = np.zeros(self.output_shape, dtype=self._output_dtype)
for i in xrange(out.shape[0]):
sample = batch_indices[i]
# self.n_classes is None means we're passing in raw target indices
if self.n_classes is None:
out[i] = _access(self._y, sample)
else:
if self.n_classes > 1:
if len(self.output_shape) == 2:
out.itemset((i, int(_access(self._y, sample))), 1.0)
else:
for idx, value in enumerate(_access(self._y, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(self._y, sample)
feed_dict[self._output_placeholder.name] = out
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the target has.
batch_size: Mini batch size to accumulate.
Attributes:
x: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
[1] + list(x_first_el.shape),
[1] + list(y_first_el.shape) if y is not None else None,
n_classes,
batch_size)
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and n_classes > 0:
self._output_dtype = np.float32
elif y is not None:
if isinstance(y_first_el, list) or isinstance(y_first_el, np.ndarray):
self._output_dtype = _check_dtype(np.dtype(type(y_first_el[0])))
else:
self._output_dtype = _check_dtype(np.dtype(type(y_first_el)))
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
Dict of input and output tensors.
"""
if self.stopped:
raise StopIteration
inp = np.zeros(self.input_shape, dtype=self._input_dtype)
if self._y is not None:
out = np.zeros(self.output_shape, dtype=self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
inp[i, :] = six.next(self._x)
except StopIteration:
self.stopped = True
inp = inp[:i, :]
if self._y is not None:
out = out[:i]
break
if self._y is not None:
y = six.next(self._y)
if self.n_classes is not None and self.n_classes > 1:
if len(self.output_shape) == 2:
out.itemset((i, y), 1.0)
else:
for idx, value in enumerate(y):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = y
if self._y is None:
return {self._input_placeholder.name: inp}
return {self._input_placeholder.name: inp,
self._output_placeholder.name: out}
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self, x, y, n_classes, batch_size, shuffle=True,
random_state=None, epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the target has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input target.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
"""
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a dict with data feed params while training.
Returns:
A dict with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output targets.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp,
output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
zooniverse/aggregation | active_weather/old/tense_weather.py | 1 | 3185 | __author__ = 'ggdhines'
import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
prediction = tf.argmax(y,1)
probabilities=y
prob = probabilities.eval(feed_dict={x: mnist.test.images}, session=sess)
predicted_labels = prediction.eval(feed_dict={x: mnist.test.images}, session=sess)
correct_labels = np.argmax(mnist.test._labels,axis=1)
true_positives = []
false_positives = []
# for i in range(len(correct_labels)):
# corr = correct_labels[i]
# pred = predicted_labels[i]
# a = prob[i][pred]
# if corr == pred:
# true_positives.append(a)
# else:
# false_positives.append(a)
#
# alphas = true_positives[:]
# alphas.extend(false_positives)
# alphas.sort()
# X = []
# Y = []
# for a in alphas:
# X.append(len([x for x in false_positives if x >= a])/float(len(false_positives)))
# Y.append(len([y for y in true_positives if y >= a])/float(len(true_positives)))
#
# print len(false_positives)
# print len(true_positives)
# plt.plot(X,Y)
# plt.plot([0,1],[0,1],"--",color="green")
# plt.xlabel("False Positive Count")
# plt.ylabel("True Positive Count")
# plt.show()
from sklearn import datasets, neighbors, linear_model
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
print X_digits
import gzip
import cPickle
from sklearn.decomposition import PCA
n_samples = len(X_digits)
f = gzip.open('/home/ggdhines/Downloads/mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()
X_train, y_train = train_set
X_test, y_test = test_set
pca = PCA(n_components=100)
pca.fit(X_train)
X_train = pca.transform(X_train)
X_test = pca.transform(X_test)
# print sum(pca.explained_variance_ratio_)
# assert False
knn = neighbors.KNeighborsClassifier()
trained_classifier = knn.fit(X_train, y_train)
prob = trained_classifier.predict_proba(X_test)
predicted = trained_classifier.predict(X_test)
for i in range(len(X_test)):
corr = y_test[i]
pred = predicted[i]
a = prob[i][pred]
if corr == pred:
true_positives.append(a)
else:
false_positives.append(a)
alphas = true_positives[:]
alphas.extend(false_positives)
alphas.sort()
X = []
Y = []
for a in alphas:
X.append(len([x for x in false_positives if x >= a])/float(len(false_positives)))
Y.append(len([y for y in true_positives if y >= a])/float(len(true_positives)))
print len(false_positives)
print len(true_positives)
plt.plot(X,Y)
plt.plot([0,1],[0,1],"--",color="green")
plt.xlabel("False Positive Count")
plt.ylabel("True Positive Count")
plt.show()
| apache-2.0 |
raincoatrun/basemap | examples/plothighsandlows.py | 10 | 3709 | """
plot H's and L's on a sea-level pressure map
(uses scipy.ndimage.filters and netcdf4-python)
"""
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from mpl_toolkits.basemap import Basemap, addcyclic
from scipy.ndimage.filters import minimum_filter, maximum_filter
from netCDF4 import Dataset
def extrema(mat,mode='wrap',window=10):
"""find the indices of local extrema (min and max)
in the input array."""
mn = minimum_filter(mat, size=window, mode=mode)
mx = maximum_filter(mat, size=window, mode=mode)
# (mat == mx) true if pixel is equal to the local max
# (mat == mn) true if pixel is equal to the local in
# Return the indices of the maxima, minima
return np.nonzero(mat == mn), np.nonzero(mat == mx)
# plot 00 UTC today.
date = datetime.now().strftime('%Y%m%d')+'00'
# open OpenDAP dataset.
#data=Dataset("http://nomads.ncep.noaa.gov:9090/dods/gfs/gfs/%s/gfs_%sz_anl" %\
# (date[0:8],date[8:10]))
data=Dataset("http://nomads.ncep.noaa.gov:9090/dods/gfs_hd/gfs_hd%s/gfs_hd_%sz"%\
(date[0:8],date[8:10]))
# read lats,lons.
lats = data.variables['lat'][:]
lons1 = data.variables['lon'][:]
nlats = len(lats)
nlons = len(lons1)
# read prmsl, convert to hPa (mb).
prmsl = 0.01*data.variables['prmslmsl'][0]
# the window parameter controls the number of highs and lows detected.
# (higher value, fewer highs and lows)
local_min, local_max = extrema(prmsl, mode='wrap', window=50)
# create Basemap instance.
m =\
Basemap(llcrnrlon=0,llcrnrlat=-80,urcrnrlon=360,urcrnrlat=80,projection='mill')
# add wrap-around point in longitude.
prmsl, lons = addcyclic(prmsl, lons1)
# contour levels
clevs = np.arange(900,1100.,5.)
# find x,y of map projection grid.
lons, lats = np.meshgrid(lons, lats)
x, y = m(lons, lats)
# create figure.
fig=plt.figure(figsize=(8,4.5))
ax = fig.add_axes([0.05,0.05,0.9,0.85])
cs = m.contour(x,y,prmsl,clevs,colors='k',linewidths=1.)
m.drawcoastlines(linewidth=1.25)
m.fillcontinents(color='0.8')
m.drawparallels(np.arange(-80,81,20),labels=[1,1,0,0])
m.drawmeridians(np.arange(0,360,60),labels=[0,0,0,1])
xlows = x[local_min]; xhighs = x[local_max]
ylows = y[local_min]; yhighs = y[local_max]
lowvals = prmsl[local_min]; highvals = prmsl[local_max]
# plot lows as blue L's, with min pressure value underneath.
xyplotted = []
# don't plot if there is already a L or H within dmin meters.
yoffset = 0.022*(m.ymax-m.ymin)
dmin = yoffset
for x,y,p in zip(xlows, ylows, lowvals):
if x < m.xmax and x > m.xmin and y < m.ymax and y > m.ymin:
dist = [np.sqrt((x-x0)**2+(y-y0)**2) for x0,y0 in xyplotted]
if not dist or min(dist) > dmin:
plt.text(x,y,'L',fontsize=14,fontweight='bold',
ha='center',va='center',color='b')
plt.text(x,y-yoffset,repr(int(p)),fontsize=9,
ha='center',va='top',color='b',
bbox = dict(boxstyle="square",ec='None',fc=(1,1,1,0.5)))
xyplotted.append((x,y))
# plot highs as red H's, with max pressure value underneath.
xyplotted = []
for x,y,p in zip(xhighs, yhighs, highvals):
if x < m.xmax and x > m.xmin and y < m.ymax and y > m.ymin:
dist = [np.sqrt((x-x0)**2+(y-y0)**2) for x0,y0 in xyplotted]
if not dist or min(dist) > dmin:
plt.text(x,y,'H',fontsize=14,fontweight='bold',
ha='center',va='center',color='r')
plt.text(x,y-yoffset,repr(int(p)),fontsize=9,
ha='center',va='top',color='r',
bbox = dict(boxstyle="square",ec='None',fc=(1,1,1,0.5)))
xyplotted.append((x,y))
plt.title('Mean Sea-Level Pressure (with Highs and Lows) %s' % date)
plt.show()
| gpl-2.0 |
aureooms/networkx | doc/make_examples_rst.py | 35 | 5461 | """
generate the rst files for the examples by iterating over the networkx examples
"""
# This code was developed from the Matplotlib gen_rst.py module
# and is distributed with the same license as Matplotlib
from __future__ import print_function
import os, glob
import os
import re
import sys
#fileList = []
#rootdir = '../../examples'
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
TODO: this check isn't adequate in some cases. Eg, if we discover
a bug when building the examples, the original and derived
will be unchanged but we still want to fource a rebuild. We can
manually remove from _static, but we may need another solution
"""
return (not os.path.exists(derived) or
os.stat(derived).st_mtime < os.stat(original).st_mtime)
def main(exampledir,sourcedir):
noplot_regex = re.compile(r"#\s*-\*-\s*noplot\s*-\*-")
datad = {}
for root, subFolders, files in os.walk(exampledir):
for fname in files:
if ( fname.startswith('.') or fname.startswith('#') or fname.startswith('_') or
fname.find('.svn')>=0 or not fname.endswith('.py') ):
continue
fullpath = os.path.join(root,fname)
contents = file(fullpath).read()
# indent
relpath = os.path.split(root)[-1]
datad.setdefault(relpath, []).append((fullpath, fname, contents))
subdirs = datad.keys()
subdirs.sort()
output_dir=os.path.join(sourcedir,'examples')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
fhindex = file(os.path.join(sourcedir,'examples','index.rst'), 'w')
fhindex.write("""\
.. _examples-index:
*****************
NetworkX Examples
*****************
.. only:: html
:Release: |version|
:Date: |today|
.. toctree::
:maxdepth: 2
""")
for subdir in subdirs:
output_dir= os.path.join(sourcedir,'examples',subdir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
static_dir = os.path.join(sourcedir, 'static', 'examples')
if not os.path.exists(static_dir):
os.makedirs(static_dir)
subdirIndexFile = os.path.join(subdir, 'index.rst')
fhsubdirIndex = file(os.path.join(output_dir,'index.rst'), 'w')
fhindex.write(' %s\n\n'%subdirIndexFile)
#thumbdir = '../_static/plot_directive/mpl_examples/%s/thumbnails/'%subdir
#for thumbname in glob.glob(os.path.join(thumbdir,'*.png')):
# fhindex.write(' %s\n'%thumbname)
fhsubdirIndex.write("""\
.. _%s-examples-index:
##############################################
%s
##############################################
.. only:: html
:Release: |version|
:Date: |today|
.. toctree::
:maxdepth: 1
"""%(subdir, subdir.title()))
data = datad[subdir]
data.sort()
#parts = os.path.split(static_dir)
#thumb_dir = ('../'*(len(parts)-1)) + os.path.join(static_dir, 'thumbnails')
for fullpath, fname, contents in data:
basename, ext = os.path.splitext(fname)
static_file = os.path.join(static_dir, fname)
#thumbfile = os.path.join(thumb_dir, '%s.png'%basename)
#print ' static_dir=%s, basename=%s, fullpath=%s, fname=%s, thumb_dir=%s, thumbfile=%s'%(static_dir, basename, fullpath, fname, thumb_dir, thumbfile)
rstfile = '%s.rst'%basename
outfile = os.path.join(output_dir, rstfile)
fhsubdirIndex.write(' %s\n'%rstfile)
if (not out_of_date(fullpath, static_file) and
not out_of_date(fullpath, outfile)):
continue
print('%s/%s' % (subdir,fname))
fhstatic = file(static_file, 'w')
fhstatic.write(contents)
fhstatic.close()
fh = file(outfile, 'w')
fh.write('.. _%s-%s:\n\n'%(subdir, basename))
base=fname.partition('.')[0]
title = '%s'%(base.replace('_',' ').title())
#title = '<img src=%s> %s example code: %s'%(thumbfile, subdir, fname)
fh.write(title + '\n')
fh.write('='*len(title) + '\n\n')
pngname=base+".png"
png=os.path.join(static_dir,pngname)
linkname = os.path.join('..', '..', 'static', 'examples')
if os.path.exists(png):
fh.write('.. image:: %s \n\n'%os.path.join(linkname,pngname))
linkname = os.path.join('..', '..', '_static', 'examples')
fh.write("[`source code <%s>`_]\n\n::\n\n" % os.path.join(linkname,fname))
# indent the contents
contents = '\n'.join([' %s'%row.rstrip() for row in contents.split('\n')])
fh.write(contents)
# fh.write('\n\nKeywords: python, matplotlib, pylab, example, codex (see :ref:`how-to-search-examples`)')
fh.close()
fhsubdirIndex.close()
fhindex.close()
if __name__ == '__main__':
import sys
try:
arg0,arg1,arg2=sys.argv[:3]
except:
arg0=sys.argv[0]
print("""
Usage: %s exampledir sourcedir
exampledir: a directory containing the python code for the examples.
sourcedir: a directory to put the generated documentation source for these examples.
""" % (arg0))
else:
main(arg1,arg2)
| bsd-3-clause |
hugobowne/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
markroxor/gensim | gensim/sklearn_api/ldamodel.py | 1 | 5783 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Chinmaya Pancholi <chinmayapancholi13@gmail.com>
# Copyright (C) 2017 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Scikit learn interface for gensim for easy use of gensim with scikit-learn
follows on scikit learn API conventions
"""
import numpy as np
from scipy import sparse
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim import matutils
class LdaTransformer(TransformerMixin, BaseEstimator):
"""
Base LDA module
"""
def __init__(self, num_topics=100, id2word=None, chunksize=2000, passes=1, update_every=1, alpha='symmetric',
eta=None, decay=0.5, offset=1.0, eval_every=10, iterations=50, gamma_threshold=0.001,
minimum_probability=0.01, random_state=None, scorer='perplexity', dtype=np.float32):
"""
Sklearn wrapper for LDA model. See gensim.model.LdaModel for parameter details.
`scorer` specifies the metric used in the `score` function.
See `gensim.models.LdaModel` class for description of the other parameters.
"""
self.gensim_model = None
self.num_topics = num_topics
self.id2word = id2word
self.chunksize = chunksize
self.passes = passes
self.update_every = update_every
self.alpha = alpha
self.eta = eta
self.decay = decay
self.offset = offset
self.eval_every = eval_every
self.iterations = iterations
self.gamma_threshold = gamma_threshold
self.minimum_probability = minimum_probability
self.random_state = random_state
self.scorer = scorer
self.dtype = dtype
def fit(self, X, y=None):
"""
Fit the model according to the given training data.
Calls gensim.models.LdaModel
"""
if sparse.issparse(X):
corpus = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
else:
corpus = X
self.gensim_model = models.LdaModel(
corpus=corpus, num_topics=self.num_topics, id2word=self.id2word,
chunksize=self.chunksize, passes=self.passes, update_every=self.update_every,
alpha=self.alpha, eta=self.eta, decay=self.decay, offset=self.offset,
eval_every=self.eval_every, iterations=self.iterations,
gamma_threshold=self.gamma_threshold, minimum_probability=self.minimum_probability,
random_state=self.random_state, dtype=self.dtype
)
return self
def transform(self, docs):
"""
Takes a list of documents as input ('docs').
Returns a matrix of topic distribution for the given document bow, where a_ij
indicates (topic_i, topic_probability_j).
The input `docs` should be in BOW format and can be a list of documents like
[[(4, 1), (7, 1)],
[(9, 1), (13, 1)], [(2, 1), (6, 1)]]
or a single document like : [(4, 1), (7, 1)]
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], tuple):
docs = [docs]
# returning dense representation for compatibility with sklearn
# but we should go back to sparse representation in the future
distribution = [matutils.sparse2full(self.gensim_model[doc], self.num_topics) for doc in docs]
return np.reshape(np.array(distribution), (len(docs), self.num_topics))
def partial_fit(self, X):
"""
Train model over X.
By default, 'online (single-pass)' mode is used for training the LDA model.
Configure `passes` and `update_every` params at init to choose the mode among :
- online (single-pass): update_every != None and passes == 1
- online (multi-pass): update_every != None and passes > 1
- batch: update_every == None
"""
if sparse.issparse(X):
X = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
if self.gensim_model is None:
self.gensim_model = models.LdaModel(
num_topics=self.num_topics, id2word=self.id2word,
chunksize=self.chunksize, passes=self.passes, update_every=self.update_every,
alpha=self.alpha, eta=self.eta, decay=self.decay, offset=self.offset,
eval_every=self.eval_every, iterations=self.iterations, gamma_threshold=self.gamma_threshold,
minimum_probability=self.minimum_probability, random_state=self.random_state,
dtype=self.dtype
)
self.gensim_model.update(corpus=X)
return self
def score(self, X, y=None):
"""
Compute score reflecting how well the model has fit for the input data.
"""
if self.scorer == 'perplexity':
corpus_words = sum(cnt for document in X for _, cnt in document)
subsample_ratio = 1.0
perwordbound = \
self.gensim_model.bound(X, subsample_ratio=subsample_ratio) / (subsample_ratio * corpus_words)
return -1 * np.exp2(-perwordbound) # returning (-1*perplexity) to select model with minimum value
elif self.scorer == 'u_mass':
goodcm = models.CoherenceModel(model=self.gensim_model, corpus=X, coherence=self.scorer, topn=3)
return goodcm.get_coherence()
else:
raise ValueError("Invalid value of `scorer` param supplied")
| lgpl-2.1 |
sanketloke/scikit-learn | sklearn/utils/__init__.py | 17 | 12898 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric)
from .deprecation import deprecated
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
from ..exceptions import ConvergenceWarning as _ConvergenceWarning
from ..exceptions import DataConversionWarning
@deprecated("ConvergenceWarning has been moved into the sklearn.exceptions "
"module. It will not be available here from version 0.19")
class ConvergenceWarning(_ConvergenceWarning):
pass
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def axis0_safe_slice(X, mask, len_mask):
"""
This mask is safer than safe_mask since it returns an
empty array, when a sparse matrix is sliced with a boolean mask
with all False, instead of raising an unhelpful error in older
versions of SciPy.
See: https://github.com/scipy/scipy/issues/5361
Also note that we can avoid doing the dot product by checking if
the len_mask is not zero in _huber_loss_and_gradient but this
is not going to be the bottleneck, since the number of outliers
and non_outliers are typically non-zero and it makes the code
tougher to follow.
"""
if len_mask != 0:
return X[safe_mask(X, mask), :]
return np.zeros(shape=(0, X.shape[1]))
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
If replace is False it should not be larger than the length of
arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
elif (max_n_samples > n_samples) and (not replace):
raise ValueError("Cannot sample %d out of arrays with dim %d"
"when replace is False" % (max_n_samples,
n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1"
% n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
| bsd-3-clause |
boland1992/SeisSuite | build/lib.linux-x86_64-2.7/seissuite/azimuth/heatinterpolate.py | 8 | 3647 | #!/usr/bin/env python
# combining density estimation and delaunay interpolation for confidence-weighted value mapping
# Dan Stowell, April 2013
import numpy as np
from numpy import random
from math import exp, log
from scipy import stats, mgrid, c_, reshape, rot90
import matplotlib.delaunay
import matplotlib.tri as tri
import matplotlib.delaunay.interpolate
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from colorsys import hls_to_rgb
#############################
# user settings
n = 100
gridsize = 100
fontsize = 'xx-small'
#############################
# first generate some random [x,y,z] data -- random locations but closest to the middle, and random z-values
data = random.randn(3, n) * 100.
# we will add some correlation to the z-values
data[2,:] += data[1,:]
data[2,:] += data[0,:]
# scale the z-values to 0--1 for convenience
zmin = np.min(data[2,:])
zmax = np.max(data[2,:])
data[2,:] = (data[2,:] - zmin) / (zmax - zmin)
xmin = np.min(data[0,:])
xmax = np.max(data[0,:])
ymin = np.min(data[1,:])
ymax = np.max(data[1,:])
zmin = np.min(data[2,:])
zmax = np.max(data[2,:])
##################################################
# plot it simply
plt.figure()
fig = plt.subplot(2,2,1)
for datum in data.T:
plt.plot(datum[0], datum[1], 'x', color=str(1.0 - datum[2]))
plt.title("scatter", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
##################################################
# now make a KDE of it and plot that
fig = plt.subplot(2,2,2)
kdeX, kdeY = mgrid[xmin:xmax:gridsize*1j, ymin:ymax:gridsize*1j]
positions = c_[kdeX.ravel(), kdeY.ravel()]
values = c_[data[0,:], data[1,:]]
kernel = stats.kde.gaussian_kde(values.T)
kdeZ = reshape(kernel(positions.T).T, kdeX.T.shape)
plt.imshow(rot90(kdeZ), cmap=cm.binary, aspect='auto')
plt.title("density of points", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
##################################################
# now make a delaunay triangulation of it and plot that
fig = plt.subplot(2,2,3)
tt = matplotlib.delaunay.triangulate.Triangulation(data[0,:], data[1,:])
#triang = tri.Triangulation(data[0,:], data[1,:])
#plt.triplot(triang, 'bo-') # this plots the actual triangles of the triangulation. I'm more interested in their interpolated values
#extrap = tt.linear_extrapolator(data[2,:])
extrap = tt.nn_extrapolator(data[2,:])
interped = extrap[xmin:xmax:gridsize*1j, ymin:ymax:gridsize*1j]
plt.imshow(rot90(interped), cmap=cm.gist_earth_r, aspect='auto')
plt.title("interpolated values", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
##################################################
# now combine delaunay with KDE
fig = plt.subplot(2,2,4)
colours = np.zeros((gridsize, gridsize, 4))
kdeZmin = np.min(kdeZ)
kdeZmax = np.max(kdeZ)
confdepth = 0.45
for x in range(gridsize):
for y in range(gridsize):
conf = (kdeZ[x,y] - kdeZmin) / (kdeZmax - kdeZmin)
val = min(1., max(0., interped[x,y]))
colour = list(cm.gist_earth_r(val))
# now fade it out to white according to conf
for index in [0,1,2]:
colour[index] = (colour[index] * conf) + (1.0 * (1. -conf))
colours[x,y,:] = colour
#colours[x,y,:] = np.hstack((hls_to_rgb(val, 0.5 + confdepth - (confdepth * conf), 1.0), 1.0))
#colours[x,y,:] = [conf, conf, 1.0-conf, val]
plt.imshow(rot90(colours), cmap=cm.gist_earth_r, aspect='auto')
plt.title("interpolated & confidence-shaded", fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
############################################
plt.savefig("output/plot_heati_simple.pdf", papertype='A4', format='pdf')
| gpl-3.0 |
DistributedML/TorML | ML/code/ML_main.py | 1 | 4843 | from __future__ import division
from numpy.linalg import norm
import matplotlib.pyplot as plt
import logistic_aggregator
import softmax_model
import softmax_model_test
import softmax_model_obj
import poisoning_compare
import numpy as np
import utils
import pdb
import sys
np.set_printoptions(suppress=True)
# Just a simple sandbox for testing out python code, without using Go.
def debug_signal_handler(signal, frame):
import pdb
pdb.set_trace()
import signal
signal.signal(signal.SIGINT, debug_signal_handler)
def basic_conv():
dataset = "mnist_train"
batch_size = 1
iterations = 4000
epsilon = 5
# Global
numFeatures = softmax_model.init(dataset, epsilon=epsilon)
print("Start training")
weights = np.random.rand(numFeatures) / 1000.0
train_progress = np.zeros(iterations)
test_progress = np.zeros(iterations)
for i in xrange(iterations):
deltas = softmax_model.privateFun(1, weights, batch_size)
weights = weights + deltas
if i % 100 == 0:
print("Train error: %d", softmax_model_test.train_error(weights))
print("Test error: %d", softmax_model_test.test_error(weights))
print("Done iterations!")
print("Train error: %d", softmax_model_test.train_error(weights))
print("Test error: %d", softmax_model_test.test_error(weights))
def non_iid(model_names, numClasses, numParams, softmax_test, iter=3000):
batch_size = 50
iterations = iter
epsilon = 5
list_of_models = []
for dataset in model_names:
list_of_models.append(softmax_model_obj.SoftMaxModel(dataset, epsilon, numClasses))
numClients = len(list_of_models)
logistic_aggregator.init(numClients, numParams)
print("Start training across " + str(numClients) + " clients.")
weights = np.random.rand(numParams) / 100.0
train_progress = []
#sum yourself
#sum pairwise
ds = np.zeros((numClients, numParams))
#cs = np.zeros((numClients, numClients))
for i in xrange(iterations):
total_delta = np.zeros((numClients, numParams))
for k in range(len(list_of_models)):
total_delta[k, :] = list_of_models[k].privateFun(1, weights, batch_size)
initial_distance = np.random.rand()*10
ds = ds + total_delta
#scs = logistic_aggregator.get_cos_similarity(total_delta)
#cs = cs + scs
# distance, poisoned = logistic_aggregator.search_distance_euc(total_delta, initial_distance, False, [], np.zeros(numClients), 0, scs)
# delta, dist, nnbs = logistic_aggregator.euclidean_binning_hm(total_delta, distance, logistic_aggregator.get_nnbs_euc_cos, scs)
#print(distance)
delta = logistic_aggregator.cos_aggregate_sum(total_delta, ds, i)
#delta = logistic_aggregator.cos_aggregate_sum_nomem(total_delta)
weights = weights + delta
if i % 100 == 0:
error = softmax_test.train_error(weights)
print("Train error: %.10f" % error)
train_progress.append(error)
#pdb.set_trace()
print("Done iterations!")
print("Train error: %d", softmax_test.train_error(weights))
print("Test error: %d", softmax_test.test_error(weights))
return weights
# amazon: 50 classes, 10000 features
# mnist: 10 classes, 784 features
# kdd: 23 classes, 41 features
if __name__ == "__main__":
argv = sys.argv[1:]
dataset = argv[0]
iter = int(argv[1])
if (dataset == "mnist"):
numClasses = 10
numFeatures = 784
elif (dataset == "kddcup"):
numClasses = 23
numFeatures = 41
elif (dataset == "amazon"):
numClasses = 50
numFeatures = 10000
else:
print("Dataset " + dataset + " not found. Available datasets: mnist kddcup amazon")
numParams = numClasses * numFeatures
dataPath = dataset + "/" + dataset
full_model = softmax_model_obj.SoftMaxModel(dataPath + "_train", 1, numClasses)
Xtest, ytest = full_model.get_data()
models = []
for i in range(numClasses):
models.append(dataPath + str(i))
for attack in argv[2:]:
attack_delim = attack.split("_")
sybil_set_size = attack_delim[0]
from_class = attack_delim[1]
to_class = attack_delim[2]
for i in range(int(sybil_set_size)):
models.append(dataPath + "_bad_" + from_class + "_" + to_class)
softmax_test = softmax_model_test.SoftMaxModelTest(dataset, numClasses, numFeatures)
weights = non_iid(models, numClasses, numParams, softmax_test, iter)
for attack in argv[2:]:
attack_delim = attack.split("_")
from_class = attack_delim[1]
to_class = attack_delim[2]
score = poisoning_compare.eval(Xtest, ytest, weights, int(from_class), int(to_class), numClasses, numFeatures)
# pdb.set_trace()
| mit |
appapantula/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
rajat1994/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
dsullivan7/scikit-learn | sklearn/svm/tests/test_svm.py | 14 | 29378 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.datasets.samples_generator import make_classification
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1)
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="auto"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('auto', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='auto' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='auto')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_inheritance():
# check that SVC classes can do inheritance
class ChildSVC(svm.SVC):
def __init__(self, foo=0):
self.foo = foo
svm.SVC.__init__(self)
clf = ChildSVC()
clf.fit(iris.data, iris.target)
clf.predict(iris.data[-1])
clf.decision_function(iris.data[-1])
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0)
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
jlegendary/scikit-learn | sklearn/decomposition/__init__.py | 99 | 1331 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD']
| bsd-3-clause |
edouardpoitras/NowTrade | nowtrade/technical_indicator.py | 1 | 24144 | """
This module contains all the technical indicators that can be applied
to a nowtrade dataset. You can use any number of these for your
strategy.
"""
import uuid
import numpy as np
import talib
import pandas as pd
from nowtrade import logger
class TechnicalIndicator(object):
"""
The base class for all technical indicators.
"""
def __init__(self):
self.logger = logger.Logger(self.__class__.__name__)
def results(self, data_frame):
"""
This needs to be implemented for all technical indicators.
All the calculations happen here.
"""
pass
class Pair(TechnicalIndicator):
"""
Pair is a helper TI created to aid in pairs trading.
Attributes:
ols -> Ordinary Least Squares of the pair
hedge_ratio -> The pair's hedge ratio
spread -> The spread between the pair
zscore -> The zscore between the pair
"""
def __init__(self, y_data, x_data, lookback):
TechnicalIndicator.__init__(self)
self.y_data = y_data
self.x_data = x_data
self.lookback = lookback
self.value = 'PAIR_%s_%s_%s' %(y_data, x_data, lookback)
self.ols = self.value
self.hedge_ratio = 'HEDGE_RATIO_%s_%s_%s' %(y_data, x_data, lookback)
self.spread = 'SPREAD_%s_%s_%s' %(y_data, x_data, lookback)
self.zscore = 'ZSCORE_%s_%s_%s' %(y_data, x_data, lookback)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return self.value
def __repr__(self):
return self.value
def results(self, data_frame):
y_value = data_frame[self.y_data]
x_value = data_frame[self.x_data]
if self.lookback >= len(x_value):
return ([self.value, self.hedge_ratio, self.spread, self.zscore], \
[pd.Series(np.nan), pd.Series(np.nan), pd.Series(np.nan), pd.Series(np.nan)])
ols_result = pd.ols(y=y_value, x=x_value, window=self.lookback)
hedge_ratio = ols_result.beta['x']
spread = y_value - hedge_ratio * x_value
data_frame[self.value] = ols_result.resid
data_frame[self.hedge_ratio] = hedge_ratio
data_frame[self.spread] = spread
data_frame[self.zscore] = (spread - \
pd.rolling_mean(spread, self.lookback)) / \
pd.rolling_std(spread, self.lookback)
class Addition(TechnicalIndicator):
"""
A simple technical indicator that adds two TIs/values together.
"""
def __init__(self, data1, data2):
TechnicalIndicator.__init__(self)
self.data1 = data1
self.data2 = data2
self.value = 'ADDITION_%s_%s' %(data1, data2)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'Addition(data1=%s, data2=%s)' %(self.data1, self.data2)
def __repr__(self):
return self.value
def results(self, data_frame):
if isinstance(self.data2, basestring): # Other TI
data_frame[self.value] = data_frame[self.data1] + data_frame[self.data2]
else:
data_frame[self.value] = data_frame[self.data1] + self.data2
class Subtraction(TechnicalIndicator):
"""
A simple technical indicator that subtracts a TI from another TI or value.
"""
def __init__(self, data1, data2):
TechnicalIndicator.__init__(self)
self.data1 = data1
self.data2 = data2
self.value = 'SUBTRACTION_%s_%s' %(data1, data2)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'Subtraction(data1=%s, data2=%s)' %(self.data1, self.data2)
def __repr__(self):
return self.value
def results(self, data_frame):
if isinstance(self.data2, basestring): # Other TI
data_frame[self.value] = data_frame[self.data1] - data_frame[self.data2]
else:
data_frame[self.value] = data_frame[self.data1] - self.data2
class Multiplication(TechnicalIndicator):
"""
A simple technical indicator that multiplies a TI with another TI or value.
"""
def __init__(self, data1, data2):
TechnicalIndicator.__init__(self)
self.data1 = data1
self.data2 = data2
self.value = 'MULTIPLICATION_%s_%s' %(data1, data2)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'Multiplication(data1=%s, data2=%s)' %(self.data1, self.data2)
def __repr__(self):
return self.value
def results(self, data_frame):
if isinstance(self.data2, basestring): # Other TI
data_frame[self.value] = data_frame[self.data1] * data_frame[self.data2]
else:
data_frame[self.value] = data_frame[self.data1] * self.data2
class Division(TechnicalIndicator):
"""
A simple technical indicator that divides a TI with another TI or value.
"""
def __init__(self, data1, data2):
TechnicalIndicator.__init__(self)
self.data1 = data1
self.data2 = data2
self.value = 'DIVISION_%s_%s' %(data1, data2)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'Division(data1=%s, data2=%s)' %(self.data1, self.data2)
def __repr__(self):
return self.value
def results(self, data_frame):
if isinstance(self.data2, basestring): # Other TI
data_frame[self.value] = data_frame[self.data1] / data_frame[self.data2]
else:
data_frame[self.value] = data_frame[self.data1] / self.data2
class PercentChange(TechnicalIndicator):
"""
A technical indicator that provides a running percent change over
a time series.
"""
def __init__(self, data1, data2):
TechnicalIndicator.__init__(self)
self.data1 = data1
self.data2 = data2
self.value = 'PERCENT_CHANGE_%s_%s' %(data1, data2)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'PercentChange(data1=%s, data2=%s)' %(self.data1, self.data2)
def __repr__(self):
return self.value
def results(self, data_frame):
if isinstance(self.data2, basestring): # Other TI
series1 = data_frame[self.data1]
series2 = data_frame[self.data2]
data_frame[self.value] = (series2 - series1) / series1
else: # Value
data_frame[self.value] = data_frame[self.data1].pct_change(self.data2)
class Max(TechnicalIndicator):
"""
A technical indicator that always returns the highest value in a series
of a predefined period.
"""
def __init__(self, data, period):
TechnicalIndicator.__init__(self)
self.data = data
self.period = period
self.value = 'MAX_%s_%s' %(data, period)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'Max(data=%s, period=%s)' %(self.data, self.period)
def __repr__(self):
return self.value
def results(self, data_frame):
try:
data_frame[self.value] = pd.rolling_max(data_frame[self.data], self.period)
except KeyError:
data_frame[self.value] = np.nan
class Min(TechnicalIndicator):
"""
A technical indicator that always returns the lowest value in a series
of a predefined period.
"""
def __init__(self, data, period):
TechnicalIndicator.__init__(self)
self.data = data
self.period = period
self.value = 'MIN_%s_%s' %(data, period)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'Min(data=%s, period=%s)' %(self.data, self.period)
def __repr__(self):
return self.value
def results(self, data_frame):
try:
data_frame[self.value] = pd.rolling_min(data_frame[self.data], self.period)
except KeyError:
data_frame[self.value] = np.nan
class InvalidShift(Exception):
"""
An exception used when the Shift technical indicator is used improperly.
A shift value of 1 or higher must be specified.
"""
pass
class Shift(TechnicalIndicator):
"""
A period of 2 will retrieve values back in time.
IE: From [1, 2, 3, 4] to [Nan, Nan, 1, 2]
"""
def __init__(self, data, period):
TechnicalIndicator.__init__(self)
if period < 1:
raise InvalidShift('Must be positive shift period')
self.data = data
self.period = period
self.value = 'SHIFT_%s_%s' %(data, period)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'Shift(data=%s, period=%s)' %(self.data, self.period)
def __repr__(self):
return self.value
def results(self, data_frame):
data_frame[self.value] = data_frame[self.data].shift(self.period)
class SMA(TechnicalIndicator):
"""
A technical indicator that returns the simple moving average of a
series/technical indicator.
"""
def __init__(self, data, period):
TechnicalIndicator.__init__(self)
self.data = data
self.period = period
self.value = 'SMA_%s_%s' %(data, period)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'SMA(data=%s, period=%s)' %(self.data, self.period)
def __repr__(self):
return self.value
def results(self, data_frame):
data_frame[self.value] = pd.rolling_mean(data_frame[self.data], self.period)
class EMA(TechnicalIndicator):
"""
Same as SMA except for an exponential moving average.
"""
def __init__(self, data, period):
TechnicalIndicator.__init__(self)
self.data = data
self.period = period
self.value = 'EMA_%s_%s' %(data, period)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'EMA(data=%s, period=%s)' %(self.data, self.period)
def __repr__(self):
return self.value
def results(self, data_frame):
try:
data_frame[self.value] = talib.EMA(data_frame[self.data].values, self.period)
except KeyError:
data_frame[self.value] = np.nan
class RSI(TechnicalIndicator):
"""
A technical indicator that returns the relative strength index of a
series/technical indicator.
"""
def __init__(self, data, period):
TechnicalIndicator.__init__(self)
self.data = data
self.period = period
self.value = 'RSI_%s_%s' %(data, period)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'RSI(data=%s, period=%s)' %(self.data, self.period)
def __repr__(self):
return self.value
def results(self, data_frame):
try:
data_frame[self.value] = talib.RSI(data_frame[self.data].values, timeperiod=self.period)
except KeyError:
data_frame[self.value] = np.nan
class ATR(TechnicalIndicator):
"""
A technical indicator that returns the average true range of a series
or technical indicator.
Need to supply the symbol, not the symbol data (example: msft, not msft.close).
"""
def __init__(self, symbol, period):
TechnicalIndicator.__init__(self)
self.symbol = symbol
self.period = period
self.value = 'ATR_%s_%s' %(symbol, period)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'ATR(symbol=%s, period=%s)' %(self.symbol, self.period)
def __repr__(self):
return self.value
def results(self, data_frame):
try:
data_frame[self.value] = talib.ATR(data_frame['%s_High' %self.symbol].values,
data_frame['%s_Low' %self.symbol].values,
data_frame['%s_Close' %self.symbol].values,
timeperiod=self.period)
except KeyError:
data_frame[self.value] = np.nan
class BBANDS(TechnicalIndicator):
"""
A technical indicator that returns the bollinger bands of a series or
technical indicator.
"""
def __init__(self, data, period, devup=2, devdown=2, ma_type=talib.MA_Type.T3):
TechnicalIndicator.__init__(self)
self.data = data
self.period = period
self.devup = 2
self.devdown = 2
self.ma_type = ma_type
self.value = 'BBANDS_MIDDLE_%s_%s_%s_%s_%s' %(data, period, devup, devdown, ma_type)
self.upper = 'BBANDS_UPPER_%s_%s_%s_%s_%s' %(data, period, devup, devdown, ma_type)
self.middle = self.value
self.lower = 'BBANDS_LOWER_%s_%s_%s_%s_%s' %(data, period, devup, devdown, ma_type)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'BBANDS(data=%s, period=%s, devup=%s, devdown=%s, ma_type=%s)' \
%(self.data, self.period, self.devup, self.devdown, self.ma_type)
def __repr__(self):
return self.value
def results(self, data_frame):
try:
upper, middle, lower = talib.BBANDS(data_frame[self.data].values,
self.period,
self.devup,
self.devdown,
matype=self.ma_type)
data_frame[self.upper] = upper
data_frame[self.middle] = middle
data_frame[self.lower] = lower
except KeyError:
data_frame[self.upper] = np.nan
data_frame[self.middle] = np.nan
data_frame[self.lower] = np.nan
class DX(TechnicalIndicator):
"""
A directional movement index technical indicator for a series or other
technical indicator.
Need to supply the symbol, not the symbol data (example: msft, not msft.close).
"""
def __init__(self, symbol, period):
TechnicalIndicator.__init__(self)
self.symbol = symbol
self.period = period
self.value = 'DX_%s_%s' %(symbol, period)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'DX(symbol=%s, period=%s)' %(self.symbol, self.period)
def __repr__(self):
return self.value
def results(self, data_frame):
try:
directional_index = talib.DX(data_frame['%s_High' %self.symbol].values,
data_frame['%s_Low' %self.symbol].values,
data_frame['%s_Close' %self.symbol].values,
timeperiod=self.period)
data_frame[self.value] = directional_index
except KeyError:
data_frame[self.value] = np.nan
class ADX(TechnicalIndicator):
"""
The average directional index technical indicator for a series or other
technical indicator.
Need to supply the symbol, not the symbol data (example: msft, not msft.close).
"""
def __init__(self, symbol, period):
TechnicalIndicator.__init__(self)
self.symbol = symbol
self.period = period
self.value = 'ADX_%s_%s' %(symbol, period)
self.plus_di = '+DI_%s_%s' %(symbol, period)
self.minus_di = '-DI_%s_%s' %(symbol, period)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'ADX(symbol=%s, period=%s)' %(self.symbol, self.period)
def __repr__(self):
return self.value
def results(self, data_frame):
try:
adx = talib.ADX(data_frame['%s_High' %self.symbol].values,
data_frame['%s_Low' %self.symbol].values,
data_frame['%s_Close' %self.symbol].values,
timeperiod=self.period)
plus_di = talib.PLUS_DI(data_frame['%s_High' %self.symbol].values,
data_frame['%s_Low' %self.symbol].values,
data_frame['%s_Close' %self.symbol].values,
timeperiod=self.period)
minus_di = talib.MINUS_DI(data_frame['%s_High' %self.symbol].values,
data_frame['%s_Low' %self.symbol].values,
data_frame['%s_Close' %self.symbol].values,
timeperiod=self.period)
data_frame[self.value] = adx
data_frame[self.plus_di] = plus_di
data_frame[self.minus_di] = minus_di
except KeyError:
data_frame[self.value] = np.nan
data_frame[self.plus_di] = np.nan
data_frame[self.minus_di] = np.nan
class ULTOSC(TechnicalIndicator):
"""
The ultimate oscillator technical indicator.
Need to supply the symbol, not the symbol data (example: msft, not msft.close).
"""
def __init__(self, symbol, period1, period2, period3):
TechnicalIndicator.__init__(self)
self.symbol = symbol
self.period1 = period1
self.period2 = period2
self.period3 = period3
self.value = 'ULTOSC_%s_%s_%s_%s' %(symbol, period1, period2, period3)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'ULTOSC(symbol=%s, period1=%s, period2=%s, period3=%s)' \
%(self.symbol, self.period1, self.period2, self.period3)
def __repr__(self):
return self.value
def results(self, data_frame):
try:
ultosc = talib.ULTOSC(data_frame['%s_High' %self.symbol].values,
data_frame['%s_Low' %self.symbol].values,
data_frame['%s_Close' %self.symbol].values,
timeperiod1=self.period1,
timeperiod2=self.period2,
timeperiod3=self.period3)
data_frame[self.value] = ultosc
except KeyError:
data_frame[self.value] = np.nan
class STOCH(TechnicalIndicator):
"""
The stochastic technical indicator.
Need to supply the symbol, not the symbol data (example: msft, not msft.close).
"""
def __init__(self, symbol, fast_k_period=5, slow_k_period=3,
slow_k_ma_type=talib.MA_Type.SMA, slow_d_period=3,
slow_d_ma_type=talib.MA_Type.SMA):
TechnicalIndicator.__init__(self)
self.symbol = str(symbol).upper()
self.fast_k_period = fast_k_period
self.slow_k_period = slow_k_period
self.slow_k_ma_type = slow_k_ma_type
self.slow_d_period = slow_d_period
self.slow_d_ma_type = slow_d_ma_type
self.value = 'STOCH_K_%s_%s_%s_%s_%s_%s' %(self.symbol,
fast_k_period,
slow_k_period,
slow_k_ma_type,
slow_d_period,
slow_d_ma_type)
self.slowk = self.value
self.slowd = 'STOCH_D_%s_%s_%s_%s_%s_%s' %(self.symbol,
fast_k_period,
slow_k_period,
slow_k_ma_type,
slow_d_period,
slow_d_ma_type)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'STOCH(symbol=%s, fast_k_period=%s, slow_k_period=%s, \
slow_k_ma_type=%s, slow_d_period=%s, self_d_ma_type=%s)' \
%(self.symbol, self.fast_k_period, self.slow_k_period, \
self.slow_k_ma_type, self.slow_d_period, self.slow_d_ma_type)
def __repr__(self):
return self.value
def results(self, data_frame):
try:
slowk, slowd = talib.STOCH(data_frame['%s_High' %self.symbol].values,
data_frame['%s_Low' %self.symbol].values,
data_frame['%s_Close' %self.symbol].values,
self.fast_k_period, self.slow_k_period,
self.slow_k_ma_type, self.slow_d_period,
self.slow_d_ma_type)
data_frame[self.slowk] = slowk
data_frame[self.slowd] = slowd
except KeyError:
data_frame[self.slowk] = np.nan
data_frame[self.slowd] = np.nan
class STOCHF(TechnicalIndicator):
"""
The fast variant of the stochastic technical indicator.
"""
def __init__(self, symbol, fast_k_period=5, fast_d_period=3,
fast_d_ma_type=talib.MA_Type.SMA):
TechnicalIndicator.__init__(self)
self.symbol = str(symbol).upper()
self.fast_k_period = fast_k_period
self.fast_d_period = fast_d_period
self.fast_d_ma_type = fast_d_ma_type
self.value = 'STOCHF_K_%s_%s_%s_%s' %(self.symbol,
fast_k_period,
fast_d_period,
fast_d_ma_type)
self.fastk = self.value
self.fastd = 'STOCHF_D_%s_%s_%s_%s' %(self.symbol,
fast_k_period,
fast_d_period,
fast_d_ma_type)
self.logger.info('Initialized - %s' %self)
def __str__(self):
return 'STOCHF(symbol=%s, fast_k_period=%s, fast_d_period=%s, \
fast_d_ma_type=%s)' %(self.symbol, self.fast_k_period, \
self.fast_d_period, self.fast_d_ma_type)
def __repr__(self):
return self.value
def results(self, data_frame):
try:
fastk, fastd = talib.STOCHF(data_frame['%s_High' %self.symbol].values,
data_frame['%s_Low' %self.symbol].values,
data_frame['%s_Close' %self.symbol].values,
self.fast_k_period, self.fast_d_period,
self.fast_d_ma_type)
data_frame[self.fastk] = fastk
data_frame[self.fastd] = fastd
except KeyError:
data_frame[self.fastk] = np.nan
data_frame[self.fastd] = np.nan
class NeuralNetwork(TechnicalIndicator):
"""
A technical indicator that enables the use of a trained neural network
to be used with nowtrade criteria.
"""
def __init__(self, network, name=None):
TechnicalIndicator.__init__(self)
self.network = network
if name is not None:
self.name = name
else:
self.name = str(uuid.uuid4())
self.value = 'NEURAL_NETWORK_%s' %self.name
self.logger.info('Initialized - %s' %self)
def __str__(self):
return self.value
def __repr__(self):
return self.value
def results(self, data_frame):
data_frame[self.value] = self.network.activate_all(data_frame)
class Ensemble(TechnicalIndicator):
"""
A technical indicator that enables the use of a fitted ensemble
to be used with nowtrade criteria.
"""
def __init__(self, ensemble, name=None):
TechnicalIndicator.__init__(self)
self.ensemble = ensemble
if name is not None:
self.name = name
else: self.name = str(uuid.uuid4())
self.value = 'ENSEMBLE_%s' %self.name
self.logger.info('Initialized - %s' %self)
def __str__(self):
return self.value
def __repr__(self):
return self.value
def results(self, data_frame):
res = self.ensemble.activate_all(data_frame)
index = data_frame.index[-len(res):]
try:
data_frame[self.value] = pd.Series(res, index=index)
except KeyError:
data_frame[self.value] = np.nan
| mit |
ChanderG/scikit-learn | setup.py | 143 | 7364 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def is_scipy_installed():
try:
import scipy
except ImportError:
return False
return True
def is_numpy_installed():
try:
import numpy
except ImportError:
return False
return True
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
if is_numpy_installed() is False:
raise ImportError("Numerical Python (NumPy) is not installed.\n"
"scikit-learn requires NumPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if is_scipy_installed() is False:
raise ImportError("Scientific Python (SciPy) is not installed.\n"
"scikit-learn requires SciPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
MatthieuBizien/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 50 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
Og192/Python | machine-learning-algorithms/mlalg/decisionTrees/treePlotter.py | 2 | 3845 | '''
Created on Oct 14, 2010
@author: Peter Harrington
'''
import matplotlib.pyplot as plt
decisionNode = dict(boxstyle="sawtooth", fc="0.8")
leafNode = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")
def getNumLeafs(myTree):
numLeafs = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
numLeafs += getNumLeafs(secondDict[key])
else: numLeafs +=1
return numLeafs
def getTreeDepth(myTree):
maxDepth = 0
firstStr = myTree.keys()[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
thisDepth = 1 + getTreeDepth(secondDict[key])
else: thisDepth = 1
if thisDepth > maxDepth: maxDepth = thisDepth
return maxDepth
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',
xytext=centerPt, textcoords='axes fraction',
va="center", ha="center", bbox=nodeType, arrowprops=arrow_args )
def plotMidText(cntrPt, parentPt, txtString):
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid, txtString, va="center", ha="center", rotation=30)
def plotTree(myTree, parentPt, nodeTxt):#if the first key tells you what feat was split on
numLeafs = getNumLeafs(myTree) #this determines the x width of this tree
depth = getTreeDepth(myTree)
firstStr = myTree.keys()[0] #the text label for this node should be this
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW, plotTree.yOff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__=='dict':#test to see if the nodes are dictonaires, if not they are leaf nodes
plotTree(secondDict[key],cntrPt,str(key)) #recursion
else: #it's a leaf node print the leaf node
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
#if you do get a dictonary you know it's a tree, and the first element will be another dict
def createPlot(inTree):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
createPlot.ax1 = plt.subplot(111, frameon=False, **axprops) #no ticks
#createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW; plotTree.yOff = 1.0;
plotTree(inTree, (0.5,1.0), '')
plt.show()
#def createPlot():
# fig = plt.figure(1, facecolor='white')
# fig.clf()
# createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses
# plotNode('a decision node', (0.5, 0.1), (0.1, 0.5), decisionNode)
# plotNode('a leaf node', (0.8, 0.1), (0.3, 0.8), leafNode)
# plt.show()
def retrieveTree(i):
listOfTrees =[{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}},
{'no surfacing': {0: 'no', 1: {'flippers': {0: {'head': {0: 'no', 1: 'yes'}}, 1: 'no'}}}}
]
return listOfTrees[i]
myTree=retrieveTree (1)
createPlot(myTree) | gpl-2.0 |
giuspugl/COSMOMAP2 | utilities/healpy_functions.py | 1 | 7756 | #
# HEALPY_FUNCTIONS.PY
# interfaces to the output function of healpy package
#
# date: 2016-12-02
# author: GIUSEPPE PUGLISI
#
# Copyright (C) 2016 Giuseppe Puglisi giuspugl@sissa.it
#
import healpy as hp
import numpy as np
import matplotlib
import os
if 'DISPLAY' not in os.environ:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def obspix2mask(obspix,nside,fname=None):
"""
From the observed pixels to a binary mask, (``mask[obspix]=1 , 0 elsewhere``)
**Parameters**
- ``osbpix``:{array}
pixels observed during the scanning of the telescope and considered
as not pathological (ordering in the HEALPIX pixelization).
- ``nside``: {int}
Healpix parameter to define the pixelization grid of the map
- ``fname``:{str}
path to the fits file to write the map, if set it writes onto the file
**Returns**
- mask :{array}
"""
mask=np.zeros(hp.nside2npix(nside))
mask[obspix]=1
if not fname is None :
hp.write_map(fname,mask)
return mask
def reorganize_map(mapin,obspix,npix,nside,pol,fname=None):
"""
From the solution map of the preconditioner to a Healpix map.
It specially splits the input array ``mapin`` which is a IQU
for a polarization analysis in to 3 arrays ``i,q,u``.
**Parameters**
- ``mapin``:{array}
solution array map (``size=npix*pol``);
- ``obspix``:{array}
array containing the observed pixels in the Healpix ordering;
- ``npix``:{int}
- ``nside``: {int}
the same as in ``obspix2mask``;
- ``pol``:{int}
- ``fname``:{str}
**Returns**
- healpix_map:{list of arrays}
pixelized map with Healpix.
"""
healpix_npix=hp.nside2npix(nside)
if pol==3:
healpix_map=np.zeros(healpix_npix*pol).reshape((healpix_npix,pol))
i=mapin[::3]
q,u=mapin[1::3],mapin[2::3]
m=np.where(q!=0.)[0]
healpix_map[obspix,0]=i
healpix_map[obspix,1]=q
healpix_map[obspix,2]=u
hp_list=[healpix_map[:,0],healpix_map[:,1],healpix_map[:,2]]
if pol==2:
healpix_map=np.zeros(healpix_npix*pol).reshape((healpix_npix,pol))
q,u=mapin[::2],mapin[1::2]
healpix_map[obspix,0]=q
healpix_map[obspix,1]=u
hp_list=[healpix_map[:,0],healpix_map[:,1]]
elif pol==1:
healpix_map=np.zeros(healpix_npix)
healpix_map[obspix]=mapin
hp_list=[healpix_map]
if not fname is None:
hp.write_map(fname,hp_list)
return hp_list
def show_map(outm,pol,patch,figname=None,title='' ,**kwargs):
"""
Output the map `outm` to screen or to a file.
**Parameters**
- ``outm`` :
map in the fullsky format;
- ``pol`` : {int}
- ``patch``: {str}
Key to a dictionary to get the equatorial coordinates given a name patch (Polarbear collaboration
is now observing in 3 patches: `ra23`, `ra12`, `lst4p5`);
- ``figname`` : {str}
If unset, outputs on screen;
- ``norm`` : {str}
key to the normalization of the color scale, ( `None`, `hist`, `log`)
- ``kwargs`` : {dict}
gnomview arguments
"""
coord_dict={'ra23':[-14.7,-33.09],'LP':[2.5,-53.5]}
runcase={1:'T',2:'QU',3:'TQU'}
keys=runcase[pol]
if not 'rot' in kwargs:
kwargs['rot']=coord_dict[patch]
for k,i in zip(list(keys), xrange(pol)):
unseen=np.where(outm[i] ==0)[0]
outm[i][unseen]=hp.UNSEEN
nplots=100+pol*10+i+1
plt.suptitle(title,fontsize=20)
hp.gnomview(outm[i],sub=nplots,title=k,**kwargs)
if figname is None:
plt.show()
else:
plt.savefig(figname)
plt.close()
def subtract_offset(mapp,obspix, pol):
"""
remove the average from the observed pixels of ``mapp``.
"""
if pol==1:
average=np.mean(mapp[obspix])
mapp[obspix]-=average
else:
for i in range(len(mapp)):
average=np.mean(mapp[i][obspix])
mapp[i][obspix]-=average
return mapp
def compare_maps(outm,inm,pol,patch,figname=None,remove_offset=True,**kwargs ):
"""
Output on device or in file the input map, the output one processed from datastream
and their difference.
**Parameters**
- ``outm`` :{array,list}
map in the `.fits` format;
- ``inm``:{array,list}
input `.fits` map to be compared with `outm`;
- ``pol`` : {int}
see :func:`show_map`;
- ``patch``: {str}
Key to a dictionary to get the equatorial coordinates given a name patch, see :func:`show_map`;
- ``mask``:{array}
binary map (0=unobserved, 1=observed pixels);
- ``figname`` : {str}
If unset, outputs on screen;
- ``remove_offset``:{bool}
If True removes the monopole from the input map,`inm`, in the observed region;
- ``norm`` : {str}
key to the normalization of the color scale, ( `None`, `hist`, `log`)
"""
coord_dict={'ra23':[-14.7,-33.09],'LP':[2.5,-53.5]}
if not 'rot' in kwargs :
kwargs['rot']=coord_dict[patch]
if pol==1:
unseen=np.where(outm ==0)[0]
observ=np.where(outm !=0)[0]
else :
unseen=np.where(outm[0] ==0)[0]
observ=np.where(outm[0] !=0)[0]
if remove_offset:
inm=subtract_offset(inm,observ,pol)
outm=subtract_offset(outm,observ,pol)
if pol==1:
inm[unseen]=hp.UNSEEN
outm[unseen]=hp.UNSEEN
hp.gnomview(inm,title='T input map',sub=131,**kwargs)
#hp.graticule(dpar=5,dmer=5,local=True)
hp.gnomview(outm,title='T reconstructed map',sub=132,**kwargs)
#hp.graticule(dpar=5,dmer=5,local=True)
diff=inm-outm
diff[unseen]=hp.UNSEEN
del kwargs['min']
del kwargs['max']
hp.gnomview(diff,title='T diff',sub=133,**kwargs)
#hp.graticule(dpar=5,dmer=5,local=True)
elif pol==3:
strnmap=['T','Q','U']
figcount=231
for i in [1,2]:
inm[i][unseen]=hp.UNSEEN
outm[i][unseen]=hp.UNSEEN
hp.gnomview(inm[i],title=strnmap[i]+' input map',sub=figcount,**kwargs)
#hp.graticule(dpar=5,dmer=5,local=True)
figcount+=1
hp.gnomview(outm[i],title=strnmap[i]+' output map',sub=figcount,**kwargs)
#hp.graticule(dpar=5,dmer=5,local=True)
figcount+=1
diff=inm[i]-outm[i]
diff[unseen]=hp.UNSEEN
try:
del kwargs['min']
del kwargs['max']
except KeyError:
pass
hp.gnomview(diff,title=strnmap[i]+' diff',sub=figcount,**kwargs)
#hp.graticule(dpar=5,dmer=5,local=True)
figcount+=1
elif pol==2:
strnmap=['Q','U']
figcount=231
for i in range(2):
inm[i][unseen]=hp.UNSEEN
outm[i][unseen]=hp.UNSEEN
hp.gnomview(inm[i],title=strnmap[i]+' input map',sub=figcount,**kwargs)
#hp.graticule(dpar=5,dmer=5,local=True)
figcount+=1
hp.gnomview(outm[i],title=strnmap[i]+' reconstructed map',sub=figcount,**kwargs)
#hp.graticule(dpar=5,dmer=5,local=True)
figcount+=1
diff=inm[i]-outm[i]
diff[unseen]=hp.UNSEEN
try:
del kwargs['min']
del kwargs['max']
except KeyError:
pass
hp.gnomview((diff),title=strnmap[i]+' diff',sub=figcount,**kwargs)
#hp.graticule(dpar=5,dmer=5,local=True)
figcount+=1
if figname is None:
plt.show()
else:
plt.savefig(figname)
plt.close()
pass
| gpl-3.0 |
fako/datascope | src/future_fashion/processors/clothing_matchers.py | 1 | 2506 | from collections import OrderedDict
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from core.processors.base import Processor
from future_fashion.colors import get_vector_from_colors
from future_fashion.frames import ClothingFrame
class ClothingSetMatchProcessor(Processor):
def __init__(self, config):
super().__init__(config)
self.clothing_frame = ClothingFrame(file_path=self.config.clothing_frame_path)
def color_and_type(self, individuals):
palette = {
key[1:]: [int(value) for value in rgb.split(",")]
for key, rgb in self.config.to_dict().items() if key.startswith("$")
}
# Get indexes of color matches per clothing type
indices = OrderedDict()
for clothing_type in palette.keys():
colors = self._get_colors_from_palette(palette, clothing_type)
frame = self.clothing_frame.get_colors_frame(clothing_type)
for index in self._get_prominent_color_match_indices(colors, frame)[:self.config.type_limit]:
indices[index] = None
for individual in individuals:
if individual["_id"] in indices:
indices[individual["_id"]] = individual
for individual in indices.values():
yield individual
@staticmethod
def _get_colors_from_palette(palette, clothing_type):
if clothing_type == "top":
return [palette["top"], palette["bottom"]]
elif clothing_type == "bottom":
return [palette["bottom"], palette["top"]]
else:
raise ValueError("Unknown clothing type {}".format(clothing_type))
def _get_prominent_color_match_indices(self, colors, colors_frame):
vector = get_vector_from_colors(colors)
num = 0 # TODO: use secondary colors to order/filter the initial results
color_vector = vector[num:num+3]
color_columns = colors_frame.columns[num:num+3]
color_similarity = cosine_similarity(
colors_frame.loc[:,color_columns],
np.array(color_vector).reshape(1, -1)
)
color_similarity = color_similarity.flatten()
indices = np.argsort(color_similarity)[::-1] # [::-1] reverses the ndarray
cut_ix = next((num for num, ix in enumerate(indices) if color_similarity[ix] < 0.85), len(indices)-1)
colors_frame = colors_frame.iloc[indices[:cut_ix]]
indices = list(colors_frame.index.values)
return indices
| gpl-3.0 |
yunfeilu/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
sinanonline/cod-mortality-state-average-extraction | extract_mortality_v2.1.py | 1 | 15366 | #! python2
import argparse
import os
from zipfile import ZipFile
import re
import numpy as np
import pandas as pd
from datetime import datetime
from time import time as wristwatch
from geographiccodes import StateCodes
from state import State
from county import County
from directories import Directories
import gc
parser = argparse.ArgumentParser(description='Obtain number of deaths per year per state for any given cause')
parser.add_argument( "-s", "--start", type=int, default=1959, help="Starting year" )
# In 2003 and 2004, the age codes change.
# Basically, I need to re-write the entire age group portion to be able to extract 2003 and 2004 age codes.
parser.add_argument( "-e", "--end", type=int, default=2002, help="Ending year" )
parser.add_argument( "-b", "--by", choices=['state', 'county'], help="Group by what level of geographic delimitation" )
parser.add_argument( "-7", "--icd7", type=str, help="Regex for ICD7 codes" )
parser.add_argument( "-8", "--icda8", type=str, help="Regex for ICDA8 codes" )
parser.add_argument( "-9", "--icd9", type=str, help="Regex for ICD9 codes" )
parser.add_argument( "-10", "--icd10", type=str, help="Regex for ICD10 codes" )
parser.add_argument( "-o", "--output", type=str, default="output.dta", help="Output file, in Stata format. (Please include the .dta extension.)" )
parser.add_argument( "--invert", action="store_true", help="Invert the regular expression filter - ie this is the logical negation operator on the mortality cause filter." )
args = parser.parse_args()
strFolder = 'VitalStats\\'
dirs = Directories()
class CauseOfDeath():
def __init__(self):
# Following codes are for suicide
try:
self.regexICD7 = args.icd7
except:
self.regexICD7 = "^9(7[0-9][0-9\-]|63-)?$"
try:
self.regexICDA8 = args.icda8
except:
self.regexICDA8 = "^95[0-9][0-9]?$"
try:
self.regexICD9 = args.icd9
except:
self.regexICD9 = "^95[0-9][0-9]?$"
try:
self.regexICD10 = args.icd10
except:
self.regexICD10 = "^X([67][0-9]|8[0-4])$"
# self.regexICD10CM also could exist, but I don't need it for VitalStats
# Following codes are for all homicide
# self.regexICD7 = "^9(8[1-3]|64)"
# self.regexICDA8 = "^96[0-9]"
# self.regexICD9 = "^96[0-9]"
# self.regexICD10 = "^(X(9[0-9]|8[5-9])|Y0[0-9])"
# I can also include legal intervention by police for all years
def regex(self, year):
if year < 1968:
return self.regexICD7
elif year < 1979:
return self.regexICDA8
elif year < 1999:
return self.regexICD9
else:
return self.regexICD10
sc = StateCodes()
cod = CauseOfDeath()
dfOutput = pd.DataFrame()
# dfOutput = pd.DataFrame( columns=['year', 'alphastr', 'count', 'female_count', 'married_female_count'], index=['year', 'alphastr'] )
# dfOutput.MultiIndex.from_product( [range(args.start, args.end+1), sc.dictAlphaFromFIPS.values()], names=['year', 'alphastr'] )
iRow = 0
races = { 1: 'white', 2: 'black', 3: 'indian' }
# The following needs to have conditions in them, and they all need to have a population counter-part.
agegroups = [ 'adult', 'teen', '20s', '30s', '40s', '50s', '60s' ] # Still not using this.
for year in range( args.start, args.end+1 ):
# Correcting for the 50% sampling in 1972, see email by Jean Roth, and also the manual file dt78icd8.pdf
intMult = (2 if year==1972 else 1)
startTime = datetime.now()
strFilename = 'mort' + str( year ) + '.dta'
columnPopulation = 'pop' + str(year)
if os.path.isfile(dirs.dirData + strFolder + strFilename):
print strFilename + ' found, opening...'
dta = open(dirs.dirData + strFolder + strFilename, 'rb')
else:
print dirs.dirData + strFolder + strFilename + ' not found'
strFilename = strFilename + '.zip'
print 'Opening ' + strFilename + ' instead...'
with ZipFile(dirs.dirData + strFolder + strFilename, 'r') as zipStats:
zipStats.printdir()
dta = zipStats.open( zipStats.namelist()[0] )
df = pd.read_stata( dta )
hasMaritalStatus = ('marstat' in df.columns)
try:
if args.invert:
df = df[~(df['ucod'].str.contains(cod.regex(year)))]
print "Regular expression filter inverted."
else:
df = df[df['ucod'].str.contains(cod.regex(year))]
print "Regular expression filter NOT inverted. (If you did not understand what this message is saying, you do not need worry about it.)"
except TypeError:
pass
# The following line works.
# print df['ucod'].size
# print df['ucod'].shape
# print df['ucod'].count()
groupBy = df.groupby(sc.colState(year=year))
for strStateCode, dfGroup in groupBy:
if sc.strConvertToAlpha(str=strStateCode.zfill(2), year=year)!="ZZ":
dfOutput.loc[iRow,'year'] = year
dfOutput.loc[iRow,'alphastr'] = sc.strConvertToAlpha(str=strStateCode.zfill(2), year=year)
dfOutput.loc[iRow,'count'] = dfGroup['ucod'].size
# White, black, indian: race == 1, 2, 3
if year < 2003:
sexMale = 1
sexFemale = 2
maritalSingle = 1
maritalMarried = 2
maritalWidowed = 3
maritalDivorced = 4
else:
sexMale = 'M'
sexFemale = 'F'
maritalSingle = 'S'
maritalMarried = 'M'
maritalWidowed = 'W'
maritalDivorced = 'D'
dfOutput.loc[iRow,'female'] = intMult * dfGroup[dfGroup['sex']==sexFemale]['ucod'].size
# For years > 2002, I could use ager52. 29 for teen; 30, 31 for 20s; 32, 33 for 30s, so on and so forth.
# I could also use ager12
dfOutput.loc[iRow,'female_adult'] = intMult * dfGroup[dfGroup['sex']==sexFemale][dfGroup['age']>=18]['ucod'].size
dfOutput.loc[iRow,'female_20s'] = intMult * dfGroup[dfGroup['sex']==sexFemale][(dfGroup['age']>=20)&(dfGroup['age']<30)]['ucod'].size
dfOutput.loc[iRow,'female_30s'] = intMult * dfGroup[dfGroup['sex']==sexFemale][(dfGroup['age']>=30)&(dfGroup['age']<40)]['ucod'].size
dfOutput.loc[iRow,'female_40s'] = intMult * dfGroup[dfGroup['sex']==sexFemale][(dfGroup['age']>=40)&(dfGroup['age']<50)]['ucod'].size
dfOutput.loc[iRow,'female_50s'] = intMult * dfGroup[dfGroup['sex']==sexFemale][(dfGroup['age']>=50)&(dfGroup['age']<60)]['ucod'].size
dfOutput.loc[iRow,'female_teen'] = intMult * dfGroup[dfGroup['sex']==sexFemale][(dfGroup['age']>=15)&(dfGroup['age']<20)]['ucod'].size
dfOutput.loc[iRow,'male'] = intMult * dfGroup[dfGroup['sex']==sexMale]['ucod'].size
dfOutput.loc[iRow,'male_adult'] = intMult * dfGroup[dfGroup['sex']==sexMale][dfGroup['age']>=18]['ucod'].size
dfOutput.loc[iRow,'male_20s'] = intMult * dfGroup[dfGroup['sex']==sexMale][(dfGroup['age']>=20)&(dfGroup['age']<30)]['ucod'].size
dfOutput.loc[iRow,'male_30s'] = intMult * dfGroup[dfGroup['sex']==sexMale][(dfGroup['age']>=30)&(dfGroup['age']<40)]['ucod'].size
dfOutput.loc[iRow,'male_40s'] = intMult * dfGroup[dfGroup['sex']==sexFemale][(dfGroup['age']>=40)&(dfGroup['age']<50)]['ucod'].size
dfOutput.loc[iRow,'male_50s'] = intMult * dfGroup[dfGroup['sex']==sexFemale][(dfGroup['age']>=50)&(dfGroup['age']<60)]['ucod'].size
dfOutput.loc[iRow,'male_teen'] = intMult * dfGroup[dfGroup['sex']==sexMale][(dfGroup['age']>=15)&(dfGroup['age']<20)]['ucod'].size
if hasMaritalStatus:
dfOutput.loc[iRow,'single_male'] = intMult * dfGroup[dfGroup['marstat']==maritalSingle][dfGroup['sex']==sexMale]['ucod'].size
dfOutput.loc[iRow,'single_female'] = intMult * dfGroup[dfGroup['marstat']==maritalSingle][dfGroup['sex']==sexFemale]['ucod'].size
dfOutput.loc[iRow,'married_female'] = intMult * dfGroup[dfGroup['marstat']==maritalMarried][dfGroup['sex']==sexFemale]['ucod'].size
dfOutput.loc[iRow,'married_male'] = intMult * dfGroup[dfGroup['marstat']==maritalMarried][dfGroup['sex']==sexMale]['ucod'].size
dfOutput.loc[iRow,'widowed_female'] = intMult * dfGroup[dfGroup['marstat']==maritalWidowed][dfGroup['sex']==sexFemale]['ucod'].size
dfOutput.loc[iRow,'widowed_male'] = intMult * dfGroup[dfGroup['marstat']==maritalWidowed][dfGroup['sex']==sexMale]['ucod'].size
dfOutput.loc[iRow,'divorced_female'] = intMult * dfGroup[dfGroup['marstat']==maritalDivorced][dfGroup['sex']==sexFemale]['ucod'].size
dfOutput.loc[iRow,'divorced_male'] = intMult * dfGroup[dfGroup['marstat']==maritalDivorced][dfGroup['sex']==sexMale]['ucod'].size
dfOutput.loc[iRow,'evermarried_female'] = dfOutput.loc[iRow,'divorced_female'] + dfOutput.loc[iRow,'widowed_female'] + dfOutput.loc[iRow,'married_female']
dfOutput.loc[iRow,'evermarried_male'] = dfOutput.loc[iRow,'divorced_male'] + dfOutput.loc[iRow,'widowed_male'] + dfOutput.loc[iRow,'married_male']
dfOutput.loc[iRow,'evermarried_female_20s'] = intMult * dfGroup[(dfGroup['marstat']==maritalMarried)|(dfGroup['marstat']==maritalDivorced)|(dfGroup['marstat']==maritalWidowed)][dfGroup['sex']==sexFemale][(dfGroup['age']>=20)&(dfGroup['age']<30)]['ucod'].size
dfOutput.loc[iRow,'evermarried_male_20s'] = intMult * dfGroup[(dfGroup['marstat']==maritalMarried)|(dfGroup['marstat']==maritalDivorced)|(dfGroup['marstat']==maritalWidowed)][dfGroup['sex']==sexMale][(dfGroup['age']>=20)&(dfGroup['age']<30)]['ucod'].size
for iRace, strRace in races.iteritems():
dfOutput.loc[iRow, strRace + '_female'] = intMult * dfGroup[dfGroup['sex']==sexFemale][dfGroup['race']==iRace]['ucod'].size
dfOutput.loc[iRow, strRace + '_female_20s'] = intMult * dfGroup[dfGroup['sex']==sexFemale][dfGroup['race']==iRace][(dfGroup['age']>=20)&(dfGroup['age']<30)]['ucod'].size
dfOutput.loc[iRow, strRace + '_female_30s'] = intMult * dfGroup[dfGroup['sex']==sexFemale][dfGroup['race']==iRace][(dfGroup['age']>=30)&(dfGroup['age']<40)]['ucod'].size
dfOutput.loc[iRow, strRace + '_female_40s'] = intMult * dfGroup[dfGroup['sex']==sexFemale][dfGroup['race']==iRace][(dfGroup['age']>=40)&(dfGroup['age']<50)]['ucod'].size
dfOutput.loc[iRow, strRace + '_female_50s'] = intMult * dfGroup[dfGroup['sex']==sexFemale][dfGroup['race']==iRace][(dfGroup['age']>=50)&(dfGroup['age']<60)]['ucod'].size
dfOutput.loc[iRow, strRace + '_female_teen'] = intMult * dfGroup[dfGroup['sex']==sexFemale][dfGroup['race']==iRace][(dfGroup['age']>=15)&(dfGroup['age']<20)]['ucod'].size
dfOutput.loc[iRow, strRace + '_male'] = intMult * dfGroup[dfGroup['sex']==sexMale][dfGroup['race']==iRace]['ucod'].size
dfOutput.loc[iRow, strRace + '_male_20s'] = intMult * dfGroup[dfGroup['sex']==sexMale][dfGroup['race']==iRace][(dfGroup['age']>=20)&(dfGroup['age']<30)]['ucod'].size
dfOutput.loc[iRow, strRace + '_male_30s'] = intMult * dfGroup[dfGroup['sex']==sexMale][dfGroup['race']==iRace][(dfGroup['age']>=30)&(dfGroup['age']<40)]['ucod'].size
dfOutput.loc[iRow, strRace + '_male_40s'] = intMult * dfGroup[dfGroup['sex']==sexMale][dfGroup['race']==iRace][(dfGroup['age']>=40)&(dfGroup['age']<50)]['ucod'].size
dfOutput.loc[iRow, strRace + '_male_50s'] = intMult * dfGroup[dfGroup['sex']==sexMale][dfGroup['race']==iRace][(dfGroup['age']>=50)&(dfGroup['age']<60)]['ucod'].size
dfOutput.loc[iRow, strRace + '_male_teen'] = intMult * dfGroup[dfGroup['sex']==sexMale][dfGroup['race']==iRace][(dfGroup['age']>=15)&(dfGroup['age']<20)]['ucod'].size
if hasMaritalStatus:
dfOutput.loc[iRow,'single_'+strRace + '_male'] = intMult * dfGroup[dfGroup['marstat']==maritalSingle][dfGroup['race']==iRace][dfGroup['sex']==sexMale]['ucod'].size
dfOutput.loc[iRow,'single_'+strRace + '_female'] = intMult * dfGroup[dfGroup['marstat']==maritalSingle][dfGroup['race']==iRace][dfGroup['sex']==sexFemale]['ucod'].size
dfOutput.loc[iRow,'married_' + strRace + '_female'] = intMult * dfGroup[dfGroup['marstat']==maritalMarried][dfGroup['race']==iRace][dfGroup['sex']==sexFemale]['ucod'].size
dfOutput.loc[iRow,'married_' + strRace + '_male'] = intMult * dfGroup[dfGroup['marstat']==maritalMarried][dfGroup['race']==iRace][dfGroup['sex']==sexMale]['ucod'].size
dfOutput.loc[iRow,'widowed_' + strRace + '_female'] = intMult * dfGroup[dfGroup['marstat']==maritalWidowed][dfGroup['race']==iRace][dfGroup['sex']==sexFemale]['ucod'].size
dfOutput.loc[iRow,'widowed_' + strRace + '_male'] = intMult * dfGroup[dfGroup['marstat']==maritalWidowed][dfGroup['race']==iRace][dfGroup['sex']==sexMale]['ucod'].size
dfOutput.loc[iRow,'divorced_' + strRace + '_female'] = intMult * dfGroup[dfGroup['marstat']==maritalDivorced][dfGroup['race']==iRace][dfGroup['sex']==sexFemale]['ucod'].size
dfOutput.loc[iRow,'divorced_' + strRace + '_male'] = intMult * dfGroup[dfGroup['marstat']==maritalDivorced][dfGroup['race']==iRace][dfGroup['sex']==sexMale]['ucod'].size
dfOutput.loc[iRow,'evermarried_' + strRace + '_female'] = dfOutput.loc[iRow,'divorced_' + strRace + '_female'] + dfOutput.loc[iRow,'widowed_' + strRace + '_female'] + dfOutput.loc[iRow,'married_' + strRace + '_female']
dfOutput.loc[iRow,'evermarried_' + strRace + '_male'] = dfOutput.loc[iRow,'divorced_' + strRace + '_male'] + dfOutput.loc[iRow,'widowed_' + strRace + '_male'] + dfOutput.loc[iRow,'married_' + strRace + '_male']
if (sc.strConvertToAlpha(str=strStateCode.zfill(2), year=year) in sc.dictFIPSfromAlpha.keys()) and (sc.strConvertToAlpha(str=strStateCode.zfill(2), year=year)!="ZZ"):
state = State(sc.strConvertToAlpha(str=strStateCode.zfill(2), year=year))
try:
dfOutput.loc[iRow,'population'] = state.intPopulation(year=year,source="older")
except:
pass
# print "I am at this point, trying to find the population. State is {0}={1}={2}. Population is: {3}".format( strStateCode, sc.strConvertToAlpha(str=strStateCode.zfill(2), year=year), state.alpha(), state.intPopulation(year=year,source="newer") )
dfOutput.loc[iRow,'pop'] = state.intPopulation(year=year)
for sex in ['Male', 'Female']:
dfOutput.loc[iRow,'pop_'+sex.lower()] = state.intPopulation(year=year,sex=sex)
dfOutput.loc[iRow,'pop_'+sex.lower()+'_adult'] = state.intPopulation(year=year,sex=sex,interest="adult")
dfOutput.loc[iRow,'pop_'+sex.lower()+'_20s'] = state.intPopulation(year=year,sex=sex,interest="20s")
dfOutput.loc[iRow,'pop_'+sex.lower()+'_30s'] = state.intPopulation(year=year,sex=sex,interest="30s")
dfOutput.loc[iRow,'pop_'+sex.lower()+'_40s'] = state.intPopulation(year=year,sex=sex,interest="40s")
dfOutput.loc[iRow,'pop_'+sex.lower()+'_50s'] = state.intPopulation(year=year,sex=sex,interest="50s")
dfOutput.loc[iRow,'pop_'+sex.lower()+'_teen'] = state.intPopulation(year=year,sex=sex,interest="teen")
for race in ['White', 'Black']:
dfOutput.loc[iRow,'pop_'+race.lower()+'_'+sex.lower()] = state.intPopulation(year=year,sex=sex,race=race)
dfOutput.loc[iRow,'pop_'+race.lower()+'_'+sex.lower()+'_adult'] = state.intPopulation(year=year,sex=sex,race=race,interest="adult")
dfOutput.loc[iRow,'pop_'+race.lower()+'_'+sex.lower()+'_20s'] = state.intPopulation(year=year,sex=sex,race=race,interest="20s")
dfOutput.loc[iRow,'pop_'+race.lower()+'_'+sex.lower()+'_30s'] = state.intPopulation(year=year,sex=sex,race=race,interest="30s")
dfOutput.loc[iRow,'pop_'+race.lower()+'_'+sex.lower()+'_40s'] = state.intPopulation(year=year,sex=sex,race=race,interest="40s")
dfOutput.loc[iRow,'pop_'+race.lower()+'_'+sex.lower()+'_50s'] = state.intPopulation(year=year,sex=sex,race=race,interest="50s")
dfOutput.loc[iRow,'pop_'+race.lower()+'_'+sex.lower()+'_teen'] = state.intPopulation(year=year,sex=sex,race=race,interest="teen")
iRow+=1
try:
dta.close()
except:
zipStats.close()
gc.collect()
print datetime.now() - startTime
dfOutput.to_stata(dirs.dirOutput + args.output)
| gpl-2.0 |
clarkfitzg/dask | dask/base.py | 2 | 5024 | import warnings
from operator import attrgetter
from hashlib import md5
from functools import partial
from toolz import merge, groupby, curry
from toolz.functoolz import Compose
from .compatibility import bind_method
from .context import _globals
from .utils import Dispatch, ignoring
__all__ = ("Base", "compute", "normalize_token", "tokenize", "visualize")
class Base(object):
"""Base class for dask collections"""
def visualize(self, filename='mydask', optimize_graph=False):
return visualize(self, filename=filename, optimize_graph=optimize_graph)
def _visualize(self, filename='mydask', optimize_graph=False):
warn = DeprecationWarning("``_visualize`` is deprecated, use "
"``visualize`` instead.")
warnings.warn(warn)
return self.visualize(filename=filename, optimize_graph=optimize_graph)
def compute(self, **kwargs):
return compute(self, **kwargs)[0]
@classmethod
def _get(cls, dsk, keys, get=None, **kwargs):
get = get or _globals['get'] or cls._default_get
dsk2 = cls._optimize(dsk, keys)
return get(dsk2, keys, **kwargs)
@classmethod
def _bind_operator(cls, op):
""" bind operator to this class """
name = op.__name__
if name.endswith('_'):
# for and_ and or_
name = name[:-1]
elif name == 'inv':
name = 'invert'
meth = '__{0}__'.format(name)
if name in ('abs', 'invert', 'neg'):
bind_method(cls, meth, cls._get_unary_operator(op))
else:
bind_method(cls, meth, cls._get_binary_operator(op))
if name in ('eq', 'gt', 'ge', 'lt', 'le', 'ne'):
return
rmeth = '__r{0}__'.format(name)
bind_method(cls, rmeth, cls._get_binary_operator(op, inv=True))
@classmethod
def _get_unary_operator(cls, op):
""" Must return a method used by unary operator """
raise NotImplementedError
@classmethod
def _get_binary_operator(cls, op, inv=False):
""" Must return a method used by binary operator """
raise NotImplementedError
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> compute(a, b)
(45, 4.5)
"""
groups = groupby(attrgetter('_optimize'), args)
get = kwargs.pop('get', None) or _globals['get']
if not get:
get = args[0]._default_get
if not all(a._default_get == get for a in args):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler `get` function using either "
"the `get` kwarg or globally with `set_options`.")
dsk = merge([opt(merge([v.dask for v in val]), [v._keys() for v in val])
for opt, val in groups.items()])
keys = [arg._keys() for arg in args]
results = get(dsk, keys, **kwargs)
return tuple(a._finalize(a, r) for a, r in zip(args, results))
def visualize(*args, **kwargs):
filename = kwargs.get('filename', 'mydask')
optimize_graph = kwargs.get('optimize_graph', False)
from dask.dot import dot_graph
if optimize_graph:
dsks = [arg._optimize(arg.dask, arg._keys()) for arg in args]
else:
dsks = [arg.dask for arg in args]
dsk = merge(dsks)
return dot_graph(dsk, filename=filename)
def normalize_function(func):
if isinstance(func, curry):
func = func._partial
if isinstance(func, Compose):
first = getattr(func, 'first', None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, partial):
kws = tuple(sorted(func.keywords.items())) if func.keywords else ()
return (normalize_function(func.func), func.args, kws)
else:
return str(func)
normalize_token = Dispatch()
normalize_token.register((int, float, str, tuple, list), lambda a: a)
normalize_token.register(object,
lambda a: normalize_function(a) if callable(a) else a)
normalize_token.register(dict, lambda a: tuple(sorted(a.items())))
with ignoring(ImportError):
import pandas as pd
normalize_token.register(pd.DataFrame,
lambda a: (id(a), len(a), list(a.columns)))
normalize_token.register(pd.Series, lambda a: (id(a), len(a), a.name))
with ignoring(ImportError):
import numpy as np
normalize_token.register(np.ndarray, lambda a: (id(a), a.dtype, a.shape))
def tokenize(*args):
""" Deterministic token
>>> tokenize([1, 2, '3'])
'9d71491b50023b06fc76928e6eddb952'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
| bsd-3-clause |
MartinSavc/scikit-learn | sklearn/tests/test_qda.py | 155 | 3481 | import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn import qda
# Data is just 6 separable points in the plane
X = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y3 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X1 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8,3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
# Assure that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X, y3).predict(X)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X, y4)
def test_qda_priors():
clf = qda.QDA()
y_pred = clf.fit(X, y).predict(X)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = qda.QDA(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X, y).predict(X)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = qda.QDA().fit(X, y)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = qda.QDA().fit(X, y, store_covariances=True)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = qda.QDA()
with ignore_warnings():
y_pred = clf.fit(X2, y).predict(X2)
assert_true(np.any(y_pred != y))
# adding a little regularization fixes the problem
clf = qda.QDA(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y)
# Case n_samples_in_a_class < n_features
clf = qda.QDA(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/extension/base/printing.py | 5 | 1167 | import io
import pytest
import pandas as pd
from .base import BaseExtensionTests
class BasePrintingTests(BaseExtensionTests):
"""Tests checking the formatting of your EA when printed."""
@pytest.mark.parametrize("size", ["big", "small"])
def test_array_repr(self, data, size):
if size == "small":
data = data[:5]
else:
data = type(data)._concat_same_type([data] * 5)
result = repr(data)
assert type(data).__name__ in result
assert f"Length: {len(data)}" in result
assert str(data.dtype) in result
if size == "big":
assert "..." in result
def test_array_repr_unicode(self, data):
result = str(data)
assert isinstance(result, str)
def test_series_repr(self, data):
ser = pd.Series(data)
assert data.dtype.name in repr(ser)
def test_dataframe_repr(self, data):
df = pd.DataFrame({"A": data})
repr(df)
def test_dtype_name_in_info(self, data):
buf = io.StringIO()
pd.DataFrame({"A": data}).info(buf=buf)
result = buf.getvalue()
assert data.dtype.name in result
| bsd-3-clause |
aelaguiz/pyvotune | samples/mnist/main.py | 1 | 6407 | # -*- coding: utf-8 -*-
import inspyred
import inspyred.ec.cea_parallel_evaluator
import pyvotune
import pyvotune.sklearn
import argparse
import random
import sys
import redis
import time
import multiprocessing
from shared import load_dataset, generator, evaluator, _evaluator,\
get_gene_pool, validate_models, classify_models
log = pyvotune.log.logger()
def get_args():
parser = argparse.ArgumentParser(description='Lark',
formatter_class=
argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-r', '--host', dest='host', type=str,
default='localhost', required=False,
help="Redis host")
parser.add_argument('-p', '--port', dest='port', type=int,
default=6379, required=False,
help="Redis port")
parser.add_argument('-b', '--db', dest='db', type=int,
default=0, required=False,
help="Redis db")
parser.add_argument('-g', '--grid_size', dest='grid_size', type=int,
default=None, required=True,
help="Size of eval grid")
parser.add_argument('-s', '--neighborhood_size', dest='neighborhood_size', type=int,
default=None, required=True,
help="Diameter of neighborhood")
parser.add_argument('-t', '--eval_timeout', dest='eval_timeout', type=int,
default=None, required=True,
help="Max duration of an eval in seconds")
parser.add_argument('-u', '--num_samples', dest='num_samples', type=int,
default=None, required=False,
help="Maximum number of samples to test with")
parser.add_argument('-l', '--max_length', dest='max_length', type=int,
default=None, required=False,
help="Maximum length of genome")
parser.add_argument('-m', '--mutation_rate', dest='mutation_rate', type=float,
default=None, required=True,
help="Mutation rate (0-1.0)")
parser.add_argument('-c', '--crossover_rate', dest='crossover_rate', type=float,
default=None, required=True,
help="Crossover rate (0-1.0)")
parser.add_argument('-d', '--debug-mode', dest='debug_mode', default=False,
action='store_true', required=False,
help="Enable debug mode")
parser.add_argument('-w', '--worker-mode', dest='worker_mode', default=False,
action='store_true', required=False, help="Enable worker mode")
parser.add_argument('-v', '--validate', dest='validate', default=None,
nargs=1, required=False,
help="Validate a given model")
parser.add_argument('-k', '--classify', dest='classify', default=None,
nargs=1, required=False,
help="Save classification into given output file")
return parser.parse_args()
if __name__ == '__main__':
app_args = get_args()
load_dataset(app_args.num_samples)
pyvotune.set_debug(app_args.debug_mode)
rng = random.Random()
gene_pool = get_gene_pool(rng)
if app_args.classify:
if not app_args.validate:
log.error("Need path to model -v")
sys.exit(0)
classify_models(app_args.validate[0], app_args.classify[0])
sys.exit(1)
elif app_args.validate:
validate_models(app_args.validate[0])
sys.exit(1)
if not app_args.worker_mode:
#################################
# Initialize PyvoTune Generator #
#################################
gen = pyvotune.Generate(
initial_state={
'sparse': False
},
gene_pool=gene_pool,
max_length=app_args.max_length,
noop_frequency=0.2,
rng=rng)
####################################
# Initialize Inspyred Genetic Algo #
####################################
ea = inspyred.ec.cEA(rng)
ea.logger = log
ea.terminator = [
#inspyred.ec.terminators.time_termination,
inspyred.ec.terminators.average_fitness_termination
]
ea.selector = inspyred.ec.selectors.fitness_proportionate_selection
ea.archiver = pyvotune.archivers.pickle_wrap_archiver
ea.observer = pyvotune.observers.stats_observer
# Use PyvoTun variators
ea.variator = [
pyvotune.variators.random_reset_mutation,
pyvotune.variators.param_reset_mutation,
pyvotune.variators.scramble_mutation,
pyvotune.variators.uniform_crossover,
pyvotune.variators.n_point_crossover
]
# Go!
final_pop = ea.evolve(
neighborhood=inspyred.ec.neighborhoods.grid_neighborhood,
generator=generator,
evaluator=pyvotune.evaluators.cell_evaluator_rq,
pyvotune_generator=gen,
async_evaluator=True,
rq_host=app_args.host,
rq_port=app_args.port,
rq_db=app_args.db,
rq_evaluator=evaluator,
rq_timeout=app_args.eval_timeout,
rq_timeout_fitness=0.,
crossover_rate=app_args.crossover_rate,
mutation_rate=app_args.mutation_rate,
tolerance=0.01,
#max_time=300,
underlying_archiver=inspyred.ec.archivers.best_archiver,
archive_path='./archive.pkl',
nbh_grid_size=app_args.grid_size,
nbh_size=app_args.neighborhood_size,
num_selected=2,
maximize=True,
num_elites=5)
####################
# Display Solution #
####################
best = max(final_pop)
fitness = _evaluator(best.candidate, display=True)
log.info("Fitness: %f" % fitness)
log.info(best.candidate)
else:
import pysplash
pysplash.set_debug(app_args.debug_mode)
# Start redis queue workers
pyvotune.evaluators.cea_rq_worker.start_pool(
app_args.host, app_args.port, '', app_args.db)
| mit |
LodewijkSikkel/paparazzi | sw/tools/tcp_aircraft_server/phoenix/__init__.py | 86 | 4470 | #Copyright 2014, Antoine Drouin
"""
Phoenix is a Python library for interacting with Paparazzi
"""
import math
"""
Unit convertions
"""
def rad_of_deg(d): return d/180.*math.pi
def deg_of_rad(r): return r*180./math.pi
def rps_of_rpm(r): return r*2.*math.pi/60.
def rpm_of_rps(r): return r/2./math.pi*60.
def m_of_inch(i): return i*0.0254
"""
Plotting
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
my_title_spec = {'color' : 'k', 'fontsize' : 20 }
def save_if(filename):
if filename: matplotlib.pyplot.savefig(filename, dpi=80)
def prepare_fig(fig=None, window_title=None, figsize=(20.48, 10.24), margins=None):
if fig == None:
fig = plt.figure(figsize=figsize)
# else:
# plt.figure(fig.number)
if margins:
left, bottom, right, top, wspace, hspace = margins
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace)
if window_title:
fig.canvas.set_window_title(window_title)
return fig
def decorate(ax, title=None, xlab=None, ylab=None, legend=None, xlim=None, ylim=None):
ax.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
ax.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
if xlab:
ax.xaxis.set_label_text(xlab)
if ylab:
ax.yaxis.set_label_text(ylab)
if title:
ax.set_title(title, my_title_spec)
if legend <> None:
ax.legend(legend, loc='best')
if xlim <> None:
ax.set_xlim(xlim[0], xlim[1])
if ylim <> None:
ax.set_ylim(ylim[0], ylim[1])
"""
Messages
"""
#: dictionary mapping the C type to its length in bytes (e.g char -> 1)
TYPE_TO_LENGTH_MAP = {
"char" : 1,
"uint8" : 1,
"int8" : 1,
"uint16" : 2,
"int16" : 2,
"uint32" : 4,
"int32" : 4,
"float" : 4,
"double" : 8,
}
#: dictionary mapping the C type to correct format string
TYPE_TO_PRINT_MAP = {
float : "%f",
str : "%s",
chr : "%c",
int : "%d"
}
ACID_ALL = 0xFF
ACID_TEST = 0xFE
ACID_GROUNDSTATION = 0xFD
#: dictionary mapping debug types to format characters
DEBUG_MESSAGES = {
"DEBUG_UINT8" : "%d",
"DEBUG_INT32" : "%d",
"DEBUG_FLOAT" : "%#f"
}
"""
Binary logs
See format description in sw/airborne/subsystems/datalink/fms_link.c
"""
import struct
def hex_of_bin(b): return ' '.join( [ "%02X" % ord( x ) for x in b ] )
import pdb
def read_binary_log(filename, tick_freq = 2*512.):
f = open(filename, "rb")
d = f.read()
packet_header_len = 6
msg_header_len = 2
def read_packet(d, packet_start):
payload_start = packet_start+packet_header_len
timestamp, payload_len = struct.unpack("IH", d[packet_start:payload_start])
msgs = read_packet_payload(d, payload_start, payload_len)
next_packet = payload_start+payload_len+2
return timestamp, msgs, next_packet
def read_packet_payload(d, s, l):
msgs = []
packet_end = s+l; msg_start = s
while msg_start<packet_end:
payload_start = msg_start+msg_header_len
msg_len, msg_id = struct.unpack("BB", d[msg_start:payload_start])
payload_end = payload_start+msg_len
msg_payload = d[payload_start:payload_end]
msgs.append([msg_id, msg_payload])
#print msg_id, msg_len, hex_of_bin(msg_payload)
msg_start = payload_end
return msgs
packets = []
packet_start=0
while packet_start<len(d):
timestamp, msgs, next_packet = read_packet(d, packet_start)
packets.append([timestamp/tick_freq, msgs])
#print timestamp, msgs
packet_start = next_packet
f.close()
return packets
def extract_from_binary_log(protocol, packets, msg_names, t_min=None, t_max=None):
ret = [{'time':[], 'data':[]} for m in msg_names]
if t_min == None: t_min = packets[0][0]
if t_max == None: t_max = packets[-1][0]
for t, msgs in packets:
if t>= t_min and t<= t_max:
for id, payload in msgs:
m = protocol.get_message_by_id('telemetry', id)
try: i = msg_names.index(m.name)
except: pass
finally: ret[i]['time'].append(t); ret[i]['data'].append(m.unpack_scaled_values(payload))
return ret
| gpl-2.0 |
McGlock/WisePair | src/code/make_fig3.py | 1 | 4382 | from matplotlib import pyplot as plt
import matplotlib.cm as cm
import sys
import pandas as pd
import numpy as np
file = sys.argv[1]
data = pd.read_csv(file)
# separate by min resample limit
# Good RS nd PSC
bouts = list(set(data.number_of_bout))
colors = cm.brg(np.linspace(0, 1, len(bouts)))
legend_series_list = []
for bout in bouts:
green_data = data.loc[(data.met_resamp_min >= data.min_resampled) &
(data.mean_rs_per_mrm >= data.min_times_resampled) & (data.number_of_bout == bout)
]
# Good RS only
blue_data = data.loc[(data.met_resamp_min >= data.min_resampled) &
(data.mean_rs_per_mrm < data.min_times_resampled) & (data.number_of_bout == bout)
]
# Good RSC only
yellow_data = data.loc[(data.met_resamp_min < data.min_resampled) &
(data.mean_rs_per_mrm >= data.min_times_resampled) & (data.number_of_bout == bout)
]
# No criteria met
red_data = data.loc[(data.met_resamp_min < data.min_resampled) &
(data.mean_rs_per_mrm < data.min_times_resampled) & (data.number_of_bout == bout)
]
gx = green_data.number_of_samples
gy = green_data.met_resamp_min
gs = green_data.number_of_bout
gc = "green"
bx = blue_data.number_of_samples
by = blue_data.met_resamp_min
bs = blue_data.number_of_bout
bc = "blue"
yx = yellow_data.number_of_samples
yy = yellow_data.met_resamp_min
ys = yellow_data.number_of_bout
yc = "yellow"
rx = red_data.number_of_samples
ry = red_data.met_resamp_min
rs = red_data.number_of_bout
rc = "red"
# red = plt.scatter(rx, ry, c=rc)#, s=rs)
#blue = plt.scatter(bx, by, c=bc)#, s=bs)
#yellow = plt.scatter(yx, yy, c=yc)#, s=ys)
#green = plt.scatter(gx, gy, c=gc)#, s=gs)
plt.axhline(y=data.min_resampled[0], color="black", ls="--")
#plt.legend((green, blue, yellow, red),
# ('RS and RSC Met', 'RS Met only', 'RSC Met only','None Met'),
# scatterpoints=1,
# loc='upper left'
# )
# Create contours
contour_data = data.loc[(data.number_of_bout == bout)][['number_of_samples', 'met_resamp_min']]
h_boundary = contour_data.groupby('number_of_samples')['met_resamp_min'].max().reset_index()
v_boundary = contour_data.groupby('met_resamp_min')['number_of_samples'].max().reset_index()
drop_list = []
for index in range(1, len(h_boundary)):
found_less = True
ind = index
while found_less == True and ind < len(h_boundary) - 1:
next_row = h_boundary.loc[[ind + 1]]
current_row = h_boundary.loc[[index]]
if float(next_row.met_resamp_min) < float(current_row.met_resamp_min):
drop_list.append(ind + 1)
if ind == len(h_boundary):
found_less = False
ind += 1
h_boundary.drop(h_boundary.index[drop_list], inplace=True)
drop_list = []
for index in range(1, len(v_boundary)):
found_less = True
ind = index
while found_less == True and ind < len(v_boundary) - 1:
next_row = v_boundary.loc[[ind + 1]]
current_row = v_boundary.loc[[index]]
if float(next_row.number_of_samples) < float(current_row.number_of_samples):
drop_list.append(ind + 1)
if ind == len(v_boundary):
found_less = False
ind += 1
v_boundary.drop(v_boundary.index[drop_list], inplace=True)
hori, = plt.plot(h_boundary.number_of_samples, h_boundary.met_resamp_min, color=colors[bouts.index(bout)])
vert, = plt.plot(v_boundary.number_of_samples, v_boundary.met_resamp_min, color=colors[bouts.index(bout)])
legend_series_list.append(hori)
plt.legend(legend_series_list,
bouts,
loc='upper left',
ncol=2,
fontsize=12
)
# set axis limits
xmin = data.number_of_samples.min() - data.number_of_samples.min() * 0.1
xmax = data.number_of_samples.max() + data.number_of_samples.max() * 0.1
ymin = data.met_resamp_min.min() - 0.5
ymax = data.met_resamp_min.max() + data.met_resamp_min.max() * 0.1
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.xlabel('total number of samples')
plt.ylabel('# of resampled individuals')
plt.savefig('fig3.pdf')
| mit |
davidgbe/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 155 | 8058 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed, [])
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
liberatorqjw/scikit-learn | sklearn/utils/tests/test_testing.py | 33 | 3783 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
hugobowne/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
belltailjp/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
lbdreyer/iris | lib/iris/tests/unit/quickplot/test_contour.py | 5 | 1533 | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris.quickplot.contour` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.quickplot as qplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
qplt.contour(self.cube, coords=("bar", "str_coord"))
self.assertPointsTickLabels("yaxis")
def test_xaxis_labels(self):
qplt.contour(self.cube, coords=("str_coord", "bar"))
self.assertPointsTickLabels("xaxis")
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord("foo").points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord("bar").points
self.bar_index = np.arange(self.bar.size)
self.data = self.cube.data
self.dataT = self.data.T
self.mpl_patch = self.patch("matplotlib.pyplot.contour")
self.draw_func = qplt.contour
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
xavierwu/scikit-learn | sklearn/datasets/base.py | 196 | 18554 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __getstate__(self):
return self.__dict__
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
NvanAdrichem/networkx | examples/drawing/chess_masters.py | 34 | 5104 | #!/usr/bin/env python
"""
An example of the MultiDiGraph clas
The function chess_pgn_graph reads a collection of chess
matches stored in the specified PGN file
(PGN ="Portable Game Notation")
Here the (compressed) default file ---
chess_masters_WCC.pgn.bz2 ---
contains all 685 World Chess Championship matches
from 1886 - 1985.
(data from http://chessproblem.my-free-games.com/chess/games/Download-PGN.php)
The chess_pgn_graph() function returns a MultiDiGraph
with multiple edges. Each node is
the last name of a chess master. Each edge is directed
from white to black and contains selected game info.
The key statement in chess_pgn_graph below is
G.add_edge(white, black, game_info)
where game_info is a dict describing each game.
"""
# Copyright (C) 2006-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
# tag names specifying what game info should be
# stored in the dict on each digraph edge
game_details=["Event",
"Date",
"Result",
"ECO",
"Site"]
def chess_pgn_graph(pgn_file="chess_masters_WCC.pgn.bz2"):
"""Read chess games in pgn format in pgn_file.
Filenames ending in .gz or .bz2 will be uncompressed.
Return the MultiDiGraph of players connected by a chess game.
Edges contain game data in a dict.
"""
import bz2
G=nx.MultiDiGraph()
game={}
datafile = bz2.BZ2File(pgn_file)
lines = (line.decode().rstrip('\r\n') for line in datafile)
for line in lines:
if line.startswith('['):
tag,value=line[1:-1].split(' ',1)
game[str(tag)]=value.strip('"')
else:
# empty line after tag set indicates
# we finished reading game info
if game:
white=game.pop('White')
black=game.pop('Black')
G.add_edge(white, black, **game)
game={}
return G
if __name__ == '__main__':
G=chess_pgn_graph()
ngames=G.number_of_edges()
nplayers=G.number_of_nodes()
print("Loaded %d chess games between %d players\n"\
% (ngames,nplayers))
# identify connected components
# of the undirected version
Gcc=list(nx.connected_component_subgraphs(G.to_undirected()))
if len(Gcc)>1:
print("Note the disconnected component consisting of:")
print(Gcc[1].nodes())
# find all games with B97 opening (as described in ECO)
openings=set([game_info['ECO']
for (white,black,game_info) in G.edges(data=True)])
print("\nFrom a total of %d different openings,"%len(openings))
print('the following games used the Sicilian opening')
print('with the Najdorff 7...Qb6 "Poisoned Pawn" variation.\n')
for (white,black,game_info) in G.edges(data=True):
if game_info['ECO']=='B97':
print(white,"vs",black)
for k,v in game_info.items():
print(" ",k,": ",v)
print("\n")
try:
import matplotlib.pyplot as plt
except ImportError:
import sys
print("Matplotlib needed for drawing. Skipping")
sys.exit(0)
# make new undirected graph H without multi-edges
H=nx.Graph(G)
# edge width is proportional number of games played
edgewidth=[]
for (u,v,d) in H.edges(data=True):
edgewidth.append(len(G.get_edge_data(u,v)))
# node size is proportional to number of games won
wins=dict.fromkeys(G.nodes(),0.0)
for (u,v,d) in G.edges(data=True):
r=d['Result'].split('-')
if r[0]=='1':
wins[u]+=1.0
elif r[0]=='1/2':
wins[u]+=0.5
wins[v]+=0.5
else:
wins[v]+=1.0
try:
pos=nx.nx_agraph.graphviz_layout(H)
except:
pos=nx.spring_layout(H,iterations=20)
plt.rcParams['text.usetex'] = False
plt.figure(figsize=(8,8))
nx.draw_networkx_edges(H,pos,alpha=0.3,width=edgewidth, edge_color='m')
nodesize=[wins[v]*50 for v in H]
nx.draw_networkx_nodes(H,pos,node_size=nodesize,node_color='w',alpha=0.4)
nx.draw_networkx_edges(H,pos,alpha=0.4,node_size=0,width=1,edge_color='k')
nx.draw_networkx_labels(H,pos,fontsize=14)
font = {'fontname' : 'Helvetica',
'color' : 'k',
'fontweight' : 'bold',
'fontsize' : 14}
plt.title("World Chess Championship Games: 1886 - 1985", font)
# change font and write text (using data coordinates)
font = {'fontname' : 'Helvetica',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 14}
plt.text(0.5, 0.97, "edge width = # games played",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.text(0.5, 0.94, "node size = # games won",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.axis('off')
plt.savefig("chess_masters.png",dpi=75)
print("Wrote chess_masters.png")
plt.show() # display
| bsd-3-clause |
gVallverdu/pymatgen | pymatgen/io/lammps/tests/test_inputs.py | 4 | 4485 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import re
import filecmp
import shutil
import pandas as pd
from pymatgen import Lattice, Structure
from pymatgen.io.lammps.data import LammpsData
from pymatgen.io.lammps.inputs import LammpsRun, write_lammps_inputs
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
"test_files", "lammps")
class LammpsRunTest(unittest.TestCase):
maxDiff = None
def test_md(self):
s = Structure.from_spacegroup(225, Lattice.cubic(3.62126),
["Cu"], [[0, 0, 0]])
ld = LammpsData.from_structure(s, atom_style="atomic")
ff = "\n".join(["pair_style eam", "pair_coeff * * Cu_u3.eam"])
md = LammpsRun.md(data=ld, force_field=ff, temperature=1600.0,
nsteps=10000)
md.write_inputs(output_dir="md")
with open(os.path.join("md", "in.md")) as f:
md_script = f.read()
script_string = """# Sample input script template for MD
# Initialization
units metal
atom_style atomic
# Atom definition
read_data md.data
#read_restart md.restart
# Force field settings (consult official document for detailed formats)
pair_style eam
pair_coeff * * Cu_u3.eam
# Create velocities
velocity all create 1600.0 142857 mom yes rot yes dist gaussian
# Ensemble constraints
#fix 1 all nve
fix 1 all nvt temp 1600.0 1600.0 0.1
#fix 1 all npt temp 1600.0 1600.0 0.1 iso $pressure $pressure 1.0
# Various operations within timestepping
#fix ...
#compute ...
# Output settings
#thermo_style custom ... # control the thermo data type to output
thermo 100 # output thermo data every N steps
#dump 1 all atom 100 traj.*.gz # dump a snapshot every 100 steps
# Actions
run 10000
"""
self.assertEqual(md_script, script_string)
self.assertTrue(os.path.exists(os.path.join("md", "md.data")))
@classmethod
def tearDownClass(cls):
temp_dirs = ["md"]
for td in temp_dirs:
if os.path.exists(td):
shutil.rmtree(td)
class FuncTest(unittest.TestCase):
def test_write_lammps_inputs(self):
# script template
with open(os.path.join(test_dir, "kappa.txt")) as f:
kappa_template = f.read()
kappa_settings = {"method": "heat"}
write_lammps_inputs(output_dir="heat", script_template=kappa_template,
settings=kappa_settings)
with open(os.path.join("heat", "in.lammps")) as f:
kappa_script = f.read()
fix_hot = re.search(r"fix\s+hot\s+all\s+([^\s]+)\s+", kappa_script)
# placeholders supposed to be filled
self.assertEqual(fix_hot.group(1), "heat")
fix_cold = re.search(r"fix\s+cold\s+all\s+([^\s]+)\s+", kappa_script)
self.assertEqual(fix_cold.group(1), "heat")
lattice = re.search(r"lattice\s+fcc\s+(.*)\n", kappa_script)
# parentheses not supposed to be filled
self.assertEqual(lattice.group(1), "${rho}")
pair_style = re.search(r"pair_style\slj/cut\s+(.*)\n", kappa_script)
self.assertEqual(pair_style.group(1), "${rc}")
with open(os.path.join(test_dir, "in.peptide")) as f:
peptide_script = f.read()
# copy data file
src = os.path.join(test_dir, "data.quartz")
write_lammps_inputs(output_dir="path", script_template=peptide_script,
data=src)
dst = os.path.join("path", "data.peptide")
self.assertTrue(filecmp.cmp(src, dst, shallow=False))
# write data file from obj
obj = LammpsData.from_file(src, atom_style="atomic")
write_lammps_inputs(output_dir="obj", script_template=peptide_script,
data=obj)
obj_read = LammpsData.from_file(os.path.join("obj", "data.peptide"),
atom_style="atomic")
pd.testing.assert_frame_equal(obj_read.masses, obj.masses)
pd.testing.assert_frame_equal(obj_read.atoms, obj.atoms)
@classmethod
def tearDownClass(cls):
temp_dirs = ["heat", "path", "obj"]
for td in temp_dirs:
if os.path.exists(td):
shutil.rmtree(td)
if __name__ == "__main__":
unittest.main()
| mit |
nekia/incubator-superset-dev | superset/sql_lab.py | 1 | 8525 | from time import sleep
from datetime import datetime
import json
import logging
import pandas as pd
import sqlalchemy
import uuid
from celery.exceptions import SoftTimeLimitExceeded
from sqlalchemy.pool import NullPool
from sqlalchemy.orm import sessionmaker
from superset import (
app, db, utils, dataframe, results_backend)
from superset.models.sql_lab import Query
from superset.sql_parse import SupersetQuery
from superset.db_engine_specs import LimitMethod
from superset.jinja_context import get_template_processor
from superset.utils import QueryStatus, get_celery_app
config = app.config
celery_app = get_celery_app(config)
stats_logger = app.config.get('STATS_LOGGER')
SQLLAB_TIMEOUT = config.get('SQLLAB_ASYNC_TIME_LIMIT_SEC', 600)
class SqlLabException(Exception):
pass
def dedup(l, suffix='__'):
"""De-duplicates a list of string by suffixing a counter
Always returns the same number of entries as provided, and always returns
unique values.
>>> dedup(['foo', 'bar', 'bar', 'bar'])
['foo', 'bar', 'bar__1', 'bar__2']
"""
new_l = []
seen = {}
for s in l:
if s in seen:
seen[s] += 1
s += suffix + str(seen[s])
else:
seen[s] = 0
new_l.append(s)
return new_l
def get_query(query_id, session, retry_count=5):
"""attemps to get the query and retry if it cannot"""
query = None
attempt = 0
while not query and attempt < retry_count:
try:
query = session.query(Query).filter_by(id=query_id).one()
except Exception:
attempt += 1
logging.error(
"Query with id `{}` could not be retrieved".format(query_id))
stats_logger.incr('error_attempting_orm_query_' + str(attempt))
logging.error("Sleeping for a sec before retrying...")
sleep(1)
if not query:
stats_logger.incr('error_failed_at_getting_orm_query')
raise SqlLabException("Failed at getting query")
return query
def get_session(nullpool):
if nullpool:
engine = sqlalchemy.create_engine(
app.config.get('SQLALCHEMY_DATABASE_URI'), poolclass=NullPool)
session_class = sessionmaker()
session_class.configure(bind=engine)
return session_class()
else:
session = db.session()
session.commit() # HACK
return session
@celery_app.task(bind=True, soft_time_limit=SQLLAB_TIMEOUT)
def get_sql_results(
ctask, query_id, return_results=True, store_results=False):
"""Executes the sql query returns the results."""
try:
return execute_sql(
ctask, query_id, return_results, store_results)
except Exception as e:
logging.exception(e)
stats_logger.incr('error_sqllab_unhandled')
sesh = get_session(not ctask.request.called_directly)
query = get_query(query_id, sesh)
query.error_message = str(e)
query.status = QueryStatus.FAILED
query.tmp_table_name = None
sesh.commit()
raise
def execute_sql(ctask, query_id, return_results=True, store_results=False):
"""Executes the sql query returns the results."""
session = get_session(not ctask.request.called_directly)
query = get_query(query_id, session)
payload = dict(query_id=query_id)
database = query.database
db_engine_spec = database.db_engine_spec
db_engine_spec.patch()
def handle_error(msg):
"""Local method handling error while processing the SQL"""
query.error_message = msg
query.status = QueryStatus.FAILED
query.tmp_table_name = None
session.commit()
payload.update({
'status': query.status,
'error_essage': msg,
})
return payload
if store_results and not results_backend:
return handle_error("Results backend isn't configured.")
# Limit enforced only for retrieving the data, not for the CTA queries.
superset_query = SupersetQuery(query.sql)
executed_sql = superset_query.stripped()
if not superset_query.is_select() and not database.allow_dml:
return handle_error(
"Only `SELECT` statements are allowed against this database")
if query.select_as_cta:
if not superset_query.is_select():
return handle_error(
"Only `SELECT` statements can be used with the CREATE TABLE "
"feature.")
return
if not query.tmp_table_name:
start_dttm = datetime.fromtimestamp(query.start_time)
query.tmp_table_name = 'tmp_{}_table_{}'.format(
query.user_id,
start_dttm.strftime('%Y_%m_%d_%H_%M_%S'))
executed_sql = superset_query.as_create_table(query.tmp_table_name)
query.select_as_cta_used = True
elif (
query.limit and superset_query.is_select() and
db_engine_spec.limit_method == LimitMethod.WRAP_SQL):
executed_sql = database.wrap_sql_limit(executed_sql, query.limit)
query.limit_used = True
try:
template_processor = get_template_processor(
database=database, query=query)
executed_sql = template_processor.process_template(executed_sql)
except Exception as e:
logging.exception(e)
msg = "Template rendering failed: " + utils.error_msg_from_exception(e)
return handle_error(msg)
query.executed_sql = executed_sql
query.status = QueryStatus.RUNNING
query.start_running_time = utils.now_as_float()
session.merge(query)
session.commit()
logging.info("Set query to 'running'")
engine = database.get_sqla_engine(
schema=query.schema, nullpool=not ctask.request.called_directly)
try:
engine = database.get_sqla_engine(
schema=query.schema, nullpool=not ctask.request.called_directly)
conn = engine.raw_connection()
cursor = conn.cursor()
logging.info("Running query: \n{}".format(executed_sql))
logging.info(query.executed_sql)
cursor.execute(
query.executed_sql, **db_engine_spec.cursor_execute_kwargs)
logging.info("Handling cursor")
db_engine_spec.handle_cursor(cursor, query, session)
logging.info("Fetching data: {}".format(query.to_dict()))
data = db_engine_spec.fetch_data(cursor, query.limit)
except SoftTimeLimitExceeded as e:
logging.exception(e)
conn.close()
return handle_error(
"SQL Lab timeout. This environment's policy is to kill queries "
"after {} seconds.".format(SQLLAB_TIMEOUT))
except Exception as e:
logging.exception(e)
conn.close()
return handle_error(db_engine_spec.extract_error_message(e))
logging.info("Fetching cursor description")
cursor_description = cursor.description
conn.commit()
conn.close()
if query.status == utils.QueryStatus.STOPPED:
return json.dumps({
'query_id': query.id,
'status': query.status,
'query': query.to_dict(),
}, default=utils.json_iso_dttm_ser)
column_names = (
[col[0] for col in cursor_description] if cursor_description else [])
column_names = dedup(column_names)
cdf = dataframe.SupersetDataFrame(pd.DataFrame(
list(data), columns=column_names))
query.rows = cdf.size
query.progress = 100
query.status = QueryStatus.SUCCESS
if query.select_as_cta:
query.select_sql = '{}'.format(database.select_star(
query.tmp_table_name,
limit=query.limit,
schema=database.force_ctas_schema,
show_cols=False,
latest_partition=False,
))
query.end_time = utils.now_as_float()
session.merge(query)
session.flush()
payload.update({
'status': query.status,
'data': cdf.data if cdf.data else [],
'columns': cdf.columns if cdf.columns else [],
'query': query.to_dict(),
})
if store_results:
key = '{}'.format(uuid.uuid4())
logging.info("Storing results in results backend, key: {}".format(key))
json_payload = json.dumps(payload, default=utils.json_iso_dttm_ser)
results_backend.set(key, utils.zlib_compress(json_payload))
query.results_key = key
query.end_result_backend_time = utils.now_as_float()
session.merge(query)
session.commit()
if return_results:
return payload
| apache-2.0 |
tawsifkhan/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/tests/formats/test_printing.py | 8 | 4905 | # -*- coding: utf-8 -*-
import nose
from pandas import compat
import pandas.formats.printing as printing
import pandas.formats.format as fmt
import pandas.util.testing as tm
import pandas.core.config as cf
_multiprocess_can_split_ = True
def test_adjoin():
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = printing.adjoin(2, *data)
assert (adjoined == expected)
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = printing.pprint_thing(b, quote_strings=True)
tm.assert_equal(res, repr(b))
res = printing.pprint_thing(b, quote_strings=False)
tm.assert_equal(res, b)
class TestFormattBase(tm.TestCase):
def test_adjoin(self):
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = printing.adjoin(2, *data)
self.assertEqual(adjoined, expected)
def test_adjoin_unicode(self):
data = [[u'あ', 'b', 'c'], ['dd', u'ええ', 'ff'], ['ggg', 'hhh', u'いいい']]
expected = u'あ dd ggg\nb ええ hhh\nc ff いいい'
adjoined = printing.adjoin(2, *data)
self.assertEqual(adjoined, expected)
adj = fmt.EastAsianTextAdjustment()
expected = u"""あ dd ggg
b ええ hhh
c ff いいい"""
adjoined = adj.adjoin(2, *data)
self.assertEqual(adjoined, expected)
cols = adjoined.split('\n')
self.assertEqual(adj.len(cols[0]), 13)
self.assertEqual(adj.len(cols[1]), 13)
self.assertEqual(adj.len(cols[2]), 16)
expected = u"""あ dd ggg
b ええ hhh
c ff いいい"""
adjoined = adj.adjoin(7, *data)
self.assertEqual(adjoined, expected)
cols = adjoined.split('\n')
self.assertEqual(adj.len(cols[0]), 23)
self.assertEqual(adj.len(cols[1]), 23)
self.assertEqual(adj.len(cols[2]), 26)
def test_justify(self):
adj = fmt.EastAsianTextAdjustment()
def just(x, *args, **kwargs):
# wrapper to test single str
return adj.justify([x], *args, **kwargs)[0]
self.assertEqual(just('abc', 5, mode='left'), 'abc ')
self.assertEqual(just('abc', 5, mode='center'), ' abc ')
self.assertEqual(just('abc', 5, mode='right'), ' abc')
self.assertEqual(just(u'abc', 5, mode='left'), 'abc ')
self.assertEqual(just(u'abc', 5, mode='center'), ' abc ')
self.assertEqual(just(u'abc', 5, mode='right'), ' abc')
self.assertEqual(just(u'パンダ', 5, mode='left'), u'パンダ')
self.assertEqual(just(u'パンダ', 5, mode='center'), u'パンダ')
self.assertEqual(just(u'パンダ', 5, mode='right'), u'パンダ')
self.assertEqual(just(u'パンダ', 10, mode='left'), u'パンダ ')
self.assertEqual(just(u'パンダ', 10, mode='center'), u' パンダ ')
self.assertEqual(just(u'パンダ', 10, mode='right'), u' パンダ')
def test_east_asian_len(self):
adj = fmt.EastAsianTextAdjustment()
self.assertEqual(adj.len('abc'), 3)
self.assertEqual(adj.len(u'abc'), 3)
self.assertEqual(adj.len(u'パンダ'), 6)
self.assertEqual(adj.len(u'パンダ'), 5)
self.assertEqual(adj.len(u'パンダpanda'), 11)
self.assertEqual(adj.len(u'パンダpanda'), 10)
def test_ambiguous_width(self):
adj = fmt.EastAsianTextAdjustment()
self.assertEqual(adj.len(u'¡¡ab'), 4)
with cf.option_context('display.unicode.ambiguous_as_wide', True):
adj = fmt.EastAsianTextAdjustment()
self.assertEqual(adj.len(u'¡¡ab'), 6)
data = [[u'あ', 'b', 'c'], ['dd', u'ええ', 'ff'],
['ggg', u'¡¡ab', u'いいい']]
expected = u'あ dd ggg \nb ええ ¡¡ab\nc ff いいい'
adjoined = adj.adjoin(2, *data)
self.assertEqual(adjoined, expected)
# TODO: fix this broken test
# def test_console_encode():
# """
# On Python 2, if sys.stdin.encoding is None (IPython with zmq frontend)
# common.console_encode should encode things as utf-8.
# """
# if compat.PY3:
# raise nose.SkipTest
# with tm.stdin_encoding(encoding=None):
# result = printing.console_encode(u"\u05d0")
# expected = u"\u05d0".encode('utf-8')
# assert (result == expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
MechCoder/scikit-learn | sklearn/decomposition/tests/test_pca.py | 9 | 21107 | import numpy as np
import scipy as sp
from itertools import product
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_less
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
solver_list = ['full', 'arpack', 'randomized', 'auto']
def test_pca():
# PCA on dense arrays
X = iris.data
for n_comp in np.arange(X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='full')
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
# test explained_variance_ratio_ == 1 with all components
pca = PCA(svd_solver='full')
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
def test_pca_arpack_solver():
# PCA on dense arrays
X = iris.data
d = X.shape[1]
# Loop excluding the extremes, invalid inputs for arpack
for n_comp in np.arange(1, d):
pca = PCA(n_components=n_comp, svd_solver='arpack', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(d), 12)
pca = PCA(n_components=0, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
pca = PCA(n_components=d, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
assert_equal(pca.n_components,
PCA(n_components=d,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
def test_pca_randomized_solver():
# PCA on dense arrays
X = iris.data
# Loop excluding the 0, invalid for randomized
for n_comp in np.arange(1, X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='randomized', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='randomized', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='randomized', random_state=0).svd_solver)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggered it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaining 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_greater(X.std(axis=0).std(), 43.8)
for solver, copy in product(solver_list, (True, False)):
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = PCA(n_components=n_components, whiten=True, copy=copy,
svd_solver=solver, random_state=0, iterated_power=7)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components),
decimal=6)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = PCA(n_components=n_components, whiten=False, copy=copy,
svd_solver=solver).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full').fit(X)
apca = PCA(n_components=2, svd_solver='arpack', random_state=0).fit(X)
assert_array_almost_equal(pca.explained_variance_,
apca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
apca.explained_variance_ratio_, 3)
rpca = PCA(n_components=2, svd_solver='randomized', random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_pca = apca.transform(X)
assert_array_almost_equal(apca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, axis=0),
decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_singular_values():
# Check that the PCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full',
random_state=rng).fit(X)
apca = PCA(n_components=2, svd_solver='arpack',
random_state=rng).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.singular_values_, apca.singular_values_, 12)
assert_array_almost_equal(pca.singular_values_, rpca.singular_values_, 1)
assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 1)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_apca = apca.transform(X)
X_rpca = rpca.transform(X)
assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
np.linalg.norm(X_pca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(apca.singular_values_**2.0),
np.linalg.norm(X_apca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(rpca.singular_values_**2.0),
np.linalg.norm(X_rpca, "fro")**2.0, 0)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(pca.singular_values_,
np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
assert_array_almost_equal(apca.singular_values_,
np.sqrt(np.sum(X_apca**2.0, axis=0)), 12)
assert_array_almost_equal(rpca.singular_values_,
np.sqrt(np.sum(X_rpca**2.0, axis=0)), 2)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=3, svd_solver='full', random_state=rng)
apca = PCA(n_components=3, svd_solver='arpack', random_state=rng)
rpca = PCA(n_components=3, svd_solver='randomized', random_state=rng)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
apca.fit(X_hat)
rpca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
for solver in solver_list:
Yt = PCA(n_components=2, svd_solver=solver).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='full').fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
for solver in solver_list:
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for solver in solver_list:
for n_components in [-1, 3]:
assert_raises(ValueError,
PCA(n_components, svd_solver=solver).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by randomized PCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2, svd_solver='randomized',
random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by randomized PCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = PCA(n_components=1, svd_solver='randomized',
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that randomized PCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='randomized', random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True, svd_solver='randomized',
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_less(relative_max_delta, 1e-5)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle', svd_solver='full').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) +
np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5, svd_solver='full').fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives different scores if whiten=True
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
ll2 = pca.score(X)
assert_true(ll1 > ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k, svd_solver='full')
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
def test_svd_solver_auto():
rng = np.random.RandomState(0)
X = rng.uniform(size=(1000, 50))
# case: n_components in (0,1) => 'full'
pca = PCA(n_components=.5)
pca.fit(X)
pca_test = PCA(n_components=.5, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: max(X.shape) <= 500 => 'full'
pca = PCA(n_components=5, random_state=0)
Y = X[:10, :]
pca.fit(Y)
pca_test = PCA(n_components=5, svd_solver='full', random_state=0)
pca_test.fit(Y)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: n_components >= .8 * min(X.shape) => 'full'
pca = PCA(n_components=50)
pca.fit(X)
pca_test = PCA(n_components=50, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# n_components >= 1 and n_components < .8 * min(X.shape) => 'randomized'
pca = PCA(n_components=10, random_state=0)
pca.fit(X)
pca_test = PCA(n_components=10, svd_solver='randomized', random_state=0)
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
def test_deprecation_randomized_pca():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
depr_message = ("Class RandomizedPCA is deprecated; RandomizedPCA was "
"deprecated in 0.18 and will be "
"removed in 0.20. Use PCA(svd_solver='randomized') "
"instead. The new implementation DOES NOT store "
"whiten ``components_``. Apply transform to get them.")
def fit_deprecated(X):
global Y
rpca = RandomizedPCA(random_state=0)
Y = rpca.fit_transform(X)
assert_warns_message(DeprecationWarning, depr_message, fit_deprecated, X)
Y_pca = PCA(svd_solver='randomized', random_state=0).fit_transform(X)
assert_array_almost_equal(Y, Y_pca)
def test_pca_sparse_input():
X = np.random.RandomState(0).rand(5, 4)
X = sp.sparse.csr_matrix(X)
assert(sp.sparse.issparse(X))
for svd_solver in solver_list:
pca = PCA(n_components=3, svd_solver=svd_solver)
assert_raises(TypeError, pca.fit, X)
def test_pca_bad_solver():
X = np.random.RandomState(0).rand(5, 4)
pca = PCA(n_components=3, svd_solver='bad_argument')
assert_raises(ValueError, pca.fit, X)
| bsd-3-clause |
SoftwareLiteracyFoundation/BAM | gui.py | 1 | 60575 | '''tkinter Tcl/Tk GUI for the Bay Assessment Model (BAM)'''
# Python distribution modules
from subprocess import Popen
from datetime import timedelta, datetime
from collections import OrderedDict as odict
from os.path import exists as path_exists
from random import randint
strptime = datetime.strptime
# Note that these are separate modules:
# tkinter.filedialog
# tkinter.messagebox
# tkinter.font
import tkinter as Tk
from tkinter import messagebox
from tkinter import filedialog
from tkinter import ttk # tk themed widgets within tkinter (tkinter.ttk)
# Community modules
from numpy import linspace, isnan
from numpy import all as npall
from numpy import NaN as npNaN
from matplotlib.colors import ListedColormap
from matplotlib.colors import BoundaryNorm
from matplotlib.colorbar import ColorbarBase
# These matplotlib objects are only used in MouseClick
from matplotlib.lines import Line2D
from matplotlib.patches import Polygon
# Used in PlotData
from matplotlib.dates import YearLocator, MonthLocator, DayLocator
from matplotlib.dates import DateFormatter
from matplotlib.pyplot import cm
# Modules to embed matplotlib figure in a Tkinter window, see:
# http://matplotlib.org/examples/user_interfaces/embedding_in_tk.html
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends._backend_tk import NavigationToolbar2Tk
# Local modules
from init import InitTimeBasins
from init import GetBasinSalinityData
from init import GetBasinStageData
from init import GetTimeIndex
import constants
#---------------------------------------------------------------
#
#---------------------------------------------------------------
class GUI:
'''GUI : tkinter & ttk with embedded matplotlib figure.'''
def __init__( self, root, model ):
self.model = model
# GUI objects
self.Tk_root = root
self.figure = None # matplotlib figure set onto canvas
self.figure_axes = None #
self.canvas = None #
self.colorbar = None # legend
self.basinListBox = None # in the mainframe
self.basinListBoxMap = dict() # { basin name : listbox index }
self.shoalListBox = None # a Toplevel pop-up
self.gaugeListBox = None # a Toplevel pop-up
self.msgText = None # Tk.Text widget for gui messages
self.buttonStyle = ttk.Style() # Note that BAM.TButton is child class
self.buttonStyle.configure( 'BAM.TButton', font = constants.buttonFont )
self.checkButtonStyle = ttk.Style()
self.checkButtonStyle.configure('BAM.TCheckbutton',
font = constants.textFont )
self.mapOptionMenu = None # map plot variable selection
self.plotOptionMenu = None # timeseries plot variable selection
self.startTimeEntry = None # simulation start time
self.endTimeEntry = None # simulation end time
self.plotVar_IntVars = odict() # { plotVariable : Tk.IntVar() }
if not self.model.args.noGUI :
# Set Tk-wide Font default for filedialog
# But it doesn't set filedialog window or button fonts
#root.tk.call( "option", "add", "*Font", constants.textFont )
root.option_add( "*Font", constants.textFont )
self.mapPlotVariable = Tk.StringVar()
self.plotVariable = Tk.StringVar()
self.current_time_label = Tk.StringVar()
self.start_text = Tk.StringVar( value = model.args.start )
self.end_text = Tk.StringVar( value = model.args.end )
self.plot_dir = model.args.basinOutputDir
self.last_plot_dir = self.plot_dir
# matplotlib colors can be names ('red', 'blue', 'green') or
# R, G, B tuple in the range [0,1] or fraction of gray in [0,1] '0.5'
# These 10 colors define the legend and map color ranges
self.colors = [ [ 0., 0., 1. ], [ 0., .2, 1. ],
[ 0., .4, 1. ], [ 0., .6, 1. ],
[ 0., .8, 1. ], [ 1., .8, 0. ],
[ 1., .6, 0. ], [ 1., .4, 0. ],
[ 1., .2, 0. ], [ 1., 0., 0. ] ]
#------------------------------------------------------------------
#
#------------------------------------------------------------------
def FloridaBayModel_Tk ( self ) :
'''User interface for the Florida Bay Model'''
icon = None
try :
icon = Tk.PhotoImage( file = self.model.args.path +\
'data/init/PyFBM_icon.png' )
except :
icon = Tk.PhotoImage( file = self.model.args.path +\
'data/init/PyFBM_icon.gif' )
if icon :
self.Tk_root.iconphoto( True, icon )
# Create the main widget Frame (window) and a control frame
mainframe = ttk.Frame( self.Tk_root, padding = "3 3 3 3" )
controlframe = ttk.Frame( self.Tk_root, padding = "1 1 1 1" )
# Create matplotlib figure and the canvas it is rendered onto
self.figure = Figure( figsize = (5, 4), # width x height (in)
dpi = 150,
facecolor = 'grey' )
self.figure_axes = self.figure.add_axes( ( 0, 0, 1, 1 ),
frameon = False,
label = 'FloridaBayModel' )
# Map limits are UTM Zone 17R in (m)
self.figure_axes.set_xlim( ( 490000, 569000 ) )
self.figure_axes.set_ylim( ( 2742000, 2799000 ) )
self.canvas = FigureCanvasTkAgg( self.figure, master = mainframe )
self.canvas.mpl_connect( 'pick_event', self.MouseClick )
# Setup the menu bar
menuBar = Tk.Menu( self.Tk_root )
self.Tk_root.config( menu = menuBar )
# File Menu -----------------------------------------------
menuFile = Tk.Menu( menuBar, tearoff=False, font = constants.textFont )
menuBar.add_cascade( menu = menuFile, label = ' File ',
font = constants.textFont )
menuFile.add_command( label = ' Init', command = self.OpenInitFile )
menuFile.add_command( label = ' Edit', command = self.EditFile )
# Dir Menu -----------------------------------------------
menuDir = Tk.Menu( menuBar, tearoff=False, font = constants.textFont )
menuBar.add_cascade( menu = menuDir, label = ' Dir ',
font = constants.textFont )
menuDir.add_command( label = 'Plot Disk', command = self.GetPlotDir )
menuDir.add_command( label = ' Output ', command = self.GetOutputDir )
# Help Menu -----------------------------------------------
menuHelp = Tk.Menu( menuBar, tearoff=False, font = constants.textFont )
menuBar.add_cascade( menu = menuHelp, label = 'Help',
font = constants.textFont )
menuHelp.add_command( label = 'About', command = self.ShowAboutInfo )
# Entry for start and end time, register CheckTimeEntry validatecommand
checkTimeCommand = controlframe.register( self.CheckTimeEntry )
self.startTimeEntry = ttk.Entry( mainframe, width = 15,
font = constants.textFont,
justify = Tk.LEFT,
textvariable = self.start_text,
validatecommand = ( checkTimeCommand,
'%P', '%W' ),
validate = 'focusout' )
self.endTimeEntry = ttk.Entry( mainframe, width = 15,
font = constants.textFont,
justify = Tk.LEFT,
textvariable = self.end_text,
validatecommand = ( checkTimeCommand,
'%P', '%W' ),
validate = 'focusout' )
# Current model time
currentTimeLabel = Tk.Label( mainframe, width = 18, height = 1,
bg = 'white',
font = constants.textFont,
justify = Tk.CENTER,
textvariable = self.current_time_label )
# Text box for messages
self.msgText = Tk.Text( mainframe, height = 5,
background='white', font = constants.textFont )
self.Message( self.model.Version )
self.Message( self.model.args.commandLine + '\n' )
msgScrollBar = ttk.Scrollbar( mainframe, orient = Tk.VERTICAL,
command = self.msgText.yview )
self.msgText.configure( yscrollcommand = msgScrollBar.set )
# Basin Listbox
self.basinListBox = Tk.Listbox( mainframe, height = 5, width = 20,
selectmode = Tk.EXTENDED,
font = constants.textFont )
# Insert the basin names into the Listbox
# The listvariable = [] option won't work if
# there is whitespace in a name, so insert them manually
i = 0
for Basin in self.model.Basins.values() :
self.basinListBox.insert( i, Basin.name )
self.basinListBoxMap[ Basin.name ] = i
i = i + 1
# Listbox vertical scroll bar : calls model.basinListBox.yview
scrollBar = ttk.Scrollbar( mainframe, orient = Tk.VERTICAL,
command = self.basinListBox.yview )
# Tell the Listbox that it will scroll according to the scrollBar
self.basinListBox.configure( yscrollcommand = scrollBar.set )
# Listbox calls ProcessBasinListbox() when selection changes
self.basinListBox.bind('<<ListboxSelect>>', self.ProcessBasinListbox)
# Colorize alternating lines of the listbox
for i in range( 0, len( self.model.Basins.keys() ), 2):
self.basinListBox.itemconfigure( i, background = '#f0f0ff' )
#--------------------------------------------------------------------
# These widgets are in the control frame
# OptionMenu for map plot types
self.mapPlotVariable.set( constants.BasinMapPlotVariable[0] )
self.mapOptionMenu = Tk.OptionMenu( controlframe,
self.mapPlotVariable,
*constants.BasinMapPlotVariable )
self.mapOptionMenu.config ( font = constants.buttonFont )
self.mapOptionMenu['menu'].config( font = constants.buttonFont )
self.mapOptionMenu.config( bg = 'white' )
# Button for self.Init()
initButton = ttk.Button( controlframe, text = "Init",
style = 'BAM.TButton',
command = lambda : InitTimeBasins(self.model))
# Button for model.Run()
runButton = ttk.Button( controlframe, text = "Run",
style = 'BAM.TButton',
command = self.model.Run )
# Button for model.Pause()
pauseButton = ttk.Button( controlframe, text = "Pause",
style = 'BAM.TButton',
command = self.model.Pause )
# Button for model.Stop()
stopButton = ttk.Button( controlframe, text = "Stop",
style = 'BAM.TButton',
command = self.model.Stop )
# Button for model.GetRecordVariables()
recordVarButton = ttk.Button( controlframe, text = "Record",
style = 'BAM.TButton',
command = self.GetRecordVariables )
# OptionMenu for variable timeseries plot types
self.plotVariable.set( constants.BasinPlotVariable[0] )
self.plotOptionMenu = Tk.OptionMenu( controlframe, self.plotVariable,
*constants.BasinPlotVariable )
self.plotOptionMenu.config ( font = constants.buttonFont )
self.plotOptionMenu['menu'].config( font = constants.buttonFont )
self.plotOptionMenu.config( bg = 'white' )
# Button for model.PlotRunData()
plotRunButton = ttk.Button( controlframe, text = "Plot Run",
style = 'BAM.TButton',
command = self.PlotRunData )
# Button for model.PlotArchiveData()
plotArchiveButton = ttk.Button( controlframe, text = "Plot Disk",
style = 'BAM.TButton',
command = self.PlotArchiveData )
# Button for PlotGaugeSalinityData()
# Can't set text color in ttk Button, use standard Tk
plotGaugeSalinityButton = Tk.Button( controlframe, text = "Salinity",
command = self.PlotGaugeSalinityData,
font = constants.buttonFont,
foreground = 'blue' )
# Button for PlotGaugeStageData()
# Can't set text color in ttk Button, use standard Tk
plotGaugeStageButton = Tk.Button( controlframe, text = "Stage",
command = self.PlotGaugeStageData,
font = constants.buttonFont,
foreground = 'blue' )
#-------------------------------------------------------------------
# Setup the window layout with the 'grid' geometry manager.
# The value of the "sticky" option is a string of 0 or more of the
# compass directions N S E W, specifying which edges of the cell the
# widget should be "stuck" to.
mainframe.grid( row = 0, column = 0, sticky = (Tk.N, Tk.W, Tk.E, Tk.S) )
controlframe.grid( in_ = mainframe, row = 1, column = 1,
rowspan = 3, sticky = (Tk.N, Tk.S) )
#-------------------------------------------------------------------
# Grid all the widgets - This is the layout of the window
# This application has 5 columns and 4 rows
# Column 1 row 1 has the controlframe with its own grid manager
# col 0 | col 1 | col 2 | col 3 | col 4
# row 0 Basin List | <----------- Message Text ----------->
# row 1 \/ | Controls | Model Time | Start | End
# row 2 \/ | \/ | <---------- Map --------->
# row 3 \/ | \/ | \/
#
#-------------------------------------------------------------------
self.basinListBox.grid( column = 0, row = 0, rowspan = 4,
sticky = (Tk.N,Tk.S,Tk.W,Tk.E) )
scrollBar.grid( column = 0, row = 0, rowspan = 4,
sticky = (Tk.E,Tk.N,Tk.S) )
self.msgText.grid( column = 1, row = 0, columnspan = 4,
sticky = (Tk.N,Tk.S,Tk.W,Tk.E) )
msgScrollBar.grid( column = 4, row = 0,
sticky = (Tk.E,Tk.N,Tk.S) )
currentTimeLabel.grid ( column = 2, row = 1 )
self.startTimeEntry.grid ( column = 3, row = 1 )
self.endTimeEntry.grid ( column = 4, row = 1 )
#-------------------------------------------------------------
# controlframe.grid is set above
self.mapOptionMenu.grid( in_ = controlframe, row = 0 )
initButton.grid ( in_ = controlframe, row = 1 )
runButton.grid ( in_ = controlframe, row = 2 )
pauseButton.grid ( in_ = controlframe, row = 3 )
stopButton.grid ( in_ = controlframe, row = 4 )
ttk.Separator( orient = Tk.HORIZONTAL ).grid( in_ = controlframe,
row = 5, pady = 5,
sticky = (Tk.E,Tk.W) )
recordVarButton.grid ( in_ = controlframe, row = 6 )
ttk.Separator( orient = Tk.HORIZONTAL ).grid( in_ = controlframe,
row = 7, pady = 5,
sticky = (Tk.E,Tk.W) )
self.plotOptionMenu.grid( in_ = controlframe, row = 8 )
plotRunButton.grid ( in_ = controlframe, row = 9 )
plotArchiveButton.grid ( in_ = controlframe, row = 10 )
ttk.Separator( orient = Tk.HORIZONTAL ).grid( in_ = controlframe,
row = 11, pady = 5,
sticky = (Tk.E,Tk.W) )
plotGaugeSalinityButton.grid( in_ = controlframe, row = 12,
sticky = (Tk.E,Tk.W) )
plotGaugeStageButton.grid ( in_ = controlframe, row = 13,
sticky = (Tk.E,Tk.W) )
#-------------------------------------------------------------
self.canvas.get_tk_widget().grid ( column = 2, row = 2,
columnspan = 3, rowspan = 2,
sticky = (Tk.N,Tk.S,Tk.W,Tk.E) )
# For each widget in the mainframe, set some padding around
# the widget to space things out and look better
for child in mainframe.winfo_children():
child.grid_configure( padx = 2, pady = 2 )
# Add a Sizegrip to make resizing easier.
#ttk.Sizegrip( mainframe ).grid( column = 99, row = 99,
# sticky = (Tk.N,Tk.S,Tk.E,Tk.W))
# Setup the resize control with the 'grid' geometry manager.
# Every column and row has a "weight" grid option associated with it,
# which tells it how much it should grow if there is extra room in
# the master to fill. By default, the weight of each column or row
# is 0, meaning don't expand to fill space. Here we set the weight
# to 1 telling the widget to expand and fill space as the window
# is resized.
# Make Sure to set on the root window!!!
self.Tk_root.columnconfigure( 0, weight = 1 )
self.Tk_root.rowconfigure ( 0, weight = 1 )
mainframe.columnconfigure( 0, weight = 0 )
mainframe.columnconfigure( 1, weight = 0 )
mainframe.columnconfigure( 2, weight = 1 )
#mainframe.columnconfigure( 3, weight = 1 )
#mainframe.columnconfigure( 4, weight = 1 )
mainframe.rowconfigure ( 0, weight = 1 )
mainframe.rowconfigure ( 1, weight = 0 )
mainframe.rowconfigure ( 2, weight = 1 )
mainframe.rowconfigure ( 3, weight = 1 )
# MapPlotVarUpdate() will refresh the legend on mapPlotVariable changes
self.mapPlotVariable.trace( 'w', self.MapPlotVarUpdate )
self.RenderShoals( init = True )
self.PlotLegend( "FloridaBayModel" )
self.canvas.draw()
#------------------------------------------------------------------
#
#------------------------------------------------------------------
def Message ( self, msg ) :
'''Display message in msgText box or on console, log to run_info.'''
if not self.model.args.noGUI :
self.msgText.insert( Tk.END, msg )
self.msgText.see ( Tk.END )
else :
print( msg, end = '' )
self.model.run_info.append( msg )
#------------------------------------------------------------------
#
#------------------------------------------------------------------
def MapPlotVarUpdate ( self, *args ) :
'''User has changed mapPlotVariable, update the legend.'''
if self.model.args.DEBUG_ALL :
print( '-> MapPlotVarUpdate: ', args[0], ', ', args[2] )
self.PlotLegend( "MapPlotVarUpdate" )
self.canvas.draw()
#-----------------------------------------------------------
#
#-----------------------------------------------------------
def InitPlotVars( self ):
'''Create map of plotVariables and Tk.IntVar() to associate
with the checkButtons accessed in GetRecordVariables(). These
Tk.IntVars are held in the plotVar_IntVars map, and read in
SetRecordVariables() to determine which variables will be
recorded into the basin.plot_variables maps for eventual
plotting and archiving'''
if self.model.args.DEBUG_ALL :
print( '-> InitPlotVars' )
self.plotVar_IntVars.clear()
for plotVariable in constants.BasinPlotVariable :
if not self.model.args.noGUI :
self.plotVar_IntVars[ plotVariable ] = Tk.IntVar()
else :
# Use the IntVar() class defined below
self.plotVar_IntVars[ plotVariable ] = IntVar()
# Set Salinity, Stage, Flow, Volume, Rain, ET as defaults
self.plotVar_IntVars[ 'Salinity' ].set( 1 )
self.plotVar_IntVars[ 'Stage' ].set( 1 )
self.plotVar_IntVars[ 'Flow' ].set( 1 )
self.plotVar_IntVars[ 'Volume' ].set( 1 )
self.plotVar_IntVars[ 'Rain' ].set( 1 )
self.plotVar_IntVars[ 'Evaporation' ].set( 1 )
self.plotVar_IntVars[ 'Runoff' ].set( 1 )
# Initialize the basin.plot_variables
for plotVariable, intVar in self.plotVar_IntVars.items() :
for basin in self.model.Basins.values() :
basin.plot_variables.clear()
if intVar.get() :
basin.plot_variables[ plotVariable ] = []
#-----------------------------------------------------------
#
#-----------------------------------------------------------
def GetRecordVariables( self ):
'''Pop up checkbuttons to select basin variables to record'''
if self.model.args.DEBUG_ALL :
print( '-> GetRecordVariables' )
#-------------------------------------------------------
# A top level pop up widget
top = Tk.Toplevel()
top.wm_title( 'Variables' )
top.minsize( width = 150, height = 100 )
top.grid()
setButton = ttk.Button( top, text = "Set",
style = 'BAM.TButton',
command = self.SetRecordVariables )
closeButton = ttk.Button( top, text = "Close",
style = 'BAM.TButton',
command = lambda: top.destroy() )
checkButtons = odict()
for plotVariable in constants.BasinPlotVariable :
checkButtons[ plotVariable ] = \
ttk.Checkbutton( top, text = plotVariable,
style = 'BAM.TCheckbutton',
variable = self.plotVar_IntVars[plotVariable])
for checkButton in checkButtons.values() :
checkButton.grid( sticky = Tk.W,
padx = 30, pady = 3 )
setButton.grid ( padx = 15, pady = 2 )
closeButton.grid( padx = 15, pady = 2 )
#-----------------------------------------------------------
#
#-----------------------------------------------------------
def SetRecordVariables( self ):
'''Callback method for Set button in GetRecordVariables().
Sets the basin.plot_variables entries based on the selected
plotVariable checkboxes in GetRecordVariables()'''
if self.model.args.DEBUG_ALL :
print( '-> SetRecordVariables' )
for plotVariable, intVar in self.plotVar_IntVars.items() :
print( plotVariable, ' : ', intVar, '=', intVar.get() )
# Reset time and basins
InitTimeBasins( self.model )
msg ='*** All records erased, time reset to start time, basins reset.\n'
self.Message( msg )
for plotVariable, intVar in self.plotVar_IntVars.items() :
for basin in self.model.Basins.values() :
basin.plot_variables.clear()
if intVar.get() :
basin.plot_variables[ plotVariable ] = []
#----------------------------------------------------------------
#
#----------------------------------------------------------------
def PlotRunData( self ) :
'''Plot data for selected basins from the current simulation.'''
if self.model.args.DEBUG_ALL :
print( '-> PlotRunData', flush = True )
# Get a list of Basins from the Listbox
BasinList = self.GetBasinListbox()
if len( BasinList ) == 0 :
msg = '\nPlotRunData: No basins are selected.\n'
self.Message( msg )
return
# Get the plotVariable type from the plotOptionMenu
plotVariable = self.plotVariable.get()
# Get the data
dataList = []
basinNames = []
for Basin in BasinList :
if plotVariable not in Basin.plot_variables.keys() :
msg = '\nPlotRunData: ' + plotVariable + ' data ' +\
'is not present for basin ' + Basin.name + '.\n'
self.Message( msg )
return
dataList.append( Basin.plot_variables[ plotVariable ] )
basinNames.append( Basin.name )
self.PlotData( self.model.times, dataList, basinNames, plotVariable,
period_record_days = self.model.simulation_days )
#----------------------------------------------------------------
#
#----------------------------------------------------------------
def PlotArchiveData( self ) :
'''Plot data from a previous run stored on disk.'''
if self.model.args.DEBUG :
print( '-> PlotArchiveData', flush = True )
# Get a list of Basins from the Listbox
BasinList = self.GetBasinListbox()
if len( BasinList ) == 0 :
msg = '\nPlotArchiveData: No basins are selected.\n'
self.Message( msg )
return
self.last_plot_dir = self.plot_dir
if self.model.args.DEBUG :
print( self.plot_dir )
# Get the data into
all_times = []
times = []
basinNames = []
dataList = []
# Get the plotVariable type from the plotOptionMenu
plotVariable = self.plotVariable.get()
for Basin in BasinList :
basinNames.append( Basin.name )
# Read the basin .csv data to get [times] and [data]
file_name = self.plot_dir + '/' + \
Basin.name + self.model.args.runID + '.csv'
try :
fd = open( file_name, 'r' )
except OSError as err :
msg = "\nPlotArchiveData: OS error: {0}\n".format( err )
self.Message( msg )
return
rows = fd.readlines()
fd.close()
# Time, Stage (m), Flow (m^3/t), Salinity (ppt), Volume (m^3)
# 2000-01-01 00:00:00, 0.0, 0.0, 37.0, 52475622.557
# 2000-01-01 01:00:00, -0.0, 45.772, 37.0, 52459564.487
variables = rows[ 0 ].split(',')
for i in range( len( variables ) ) :
variables[ i ] = variables[ i ].strip()
# column index for Time
time_col_i = variables.index( 'Time' )
# column index for plotVariable
try :
unit_str = constants.PlotVariableUnit[ plotVariable ]
data_col_i = variables.index( plotVariable + ' ' + unit_str )
except ValueError as err :
msg = "\nPlotArchiveData: {0}\n".format( err )
self.Message( msg )
return
# Get all times in file from first Basin in BasinList
if Basin == BasinList[ 0 ] :
for i in range( 1, len( rows ) ) :
words = rows[ i ].split(',')
time_i = datetime.strptime( words[ time_col_i ].strip(),
'%Y-%m-%d %H:%M:%S' )
all_times.append( time_i )
# Find index in dates for start_time & end_time
start_i, end_i = GetTimeIndex( plotVariable, all_times,
self.model.start_time,
self.model.end_time )
# Populate only data needed for the simulation timeframe
times = all_times[ start_i : end_i + 1 ]
data = []
for i in range( start_i, end_i + 1 ) :
row = rows[ i + 1 ]
words = row.split(',')
value_string = words[ data_col_i ]
if 'NA' in value_string :
data.append( npNaN )
else :
data.append( float( value_string ) )
# If data is all NA don't plot
if npall( isnan( data ) ) :
msg = "\nPlotArchiveData: " + plotVariable + ' for basin ' +\
Basin.name + ' does not exist.\n'
self.Message( msg )
else :
dataList.append( data )
period_record = times[ len( times ) - 1 ] - times[ 0 ] # timedelta
self.PlotData( times, dataList, basinNames, plotVariable,
period_record_days = period_record.days,
path = ' from: ' + self.plot_dir )
#----------------------------------------------------------------
#
#----------------------------------------------------------------
def PlotGaugeSalinityData( self ) :
'''Plot salinity data from gauge observations.'''
if self.model.args.DEBUG :
print( '-> PlotGaugeSalinityData', flush = True )
BasinList = self.GetBasinListbox()
if len( BasinList ) == 0 :
msg = '\nPlotGaugeSalinityData: No basins are selected.\n'
self.Message( msg )
return
basin_names = [ Basin.name for Basin in BasinList ]
# Read the salinity .csv gauge data to get [times] and [data]
if not self.model.salinity_data :
GetBasinSalinityData( self.model )
# plotVariables are salinity stations IDs : 'MD', 'GB'...
plotVariables = []
for Basin in BasinList :
if Basin.salinity_station :
plotVariables.append( Basin.salinity_station )
# Get times[] from model.salinity_data.keys()
times = [ datetime( year = key_tuple[0],
month = key_tuple[1],
day = key_tuple[2] )
for key_tuple in self.model.salinity_data.keys() ]
# Get data
dataList = []
for plotVariable in plotVariables :
data = []
for key in self.model.salinity_data.keys() :
data.append( self.model.salinity_data[key][plotVariable] )
dataList.append( data )
if not dataList :
msg = 'No salinity gauge data for these basins.\n'
self.Message( msg )
return
period_record = times[ -1 ] - times[ 0 ] # timedelta
self.PlotData( times, dataList,
basinNames = basin_names,
plotVariable = 'Salinity',
period_record_days = period_record.days,
title = 'Gauge: ',
path = ' from: ' + self.model.args.salinityFile )
#----------------------------------------------------------------
#
#----------------------------------------------------------------
def PlotGaugeStageData( self ) :
'''Plot stage data from gauge observations.'''
if self.model.args.DEBUG :
print( '-> PlotGaugeStageData', flush = True )
BasinList = self.GetBasinListbox()
if len( BasinList ) == 0 :
msg = '\nPlotGaugeStageData: No basins are selected.\n'
self.Message( msg )
return
basin_names = [ Basin.name for Basin in BasinList ]
# Read the stage .csv gauge data to get [times] and [data]
if not self.model.stage_data :
GetBasinStageData( self.model )
# plotVariables are stations IDs : 'MD', 'GB'...
# which are the same as the salinity_station
plotVariables = []
for Basin in BasinList :
if Basin.salinity_station :
plotVariables.append( Basin.salinity_station )
# Get times[] from model.salinity_data.keys()
times = [ datetime( year = key_tuple[0],
month = key_tuple[1],
day = key_tuple[2] )
for key_tuple in self.model.stage_data.keys() ]
# Get data
dataList = []
for plotVariable in plotVariables :
data = []
for key in self.model.stage_data.keys() :
try :
data.append( self.model.stage_data[ key ][ plotVariable ] )
except KeyError :
msg = 'No stage gauge data for ' + plotVariable + '.\n'
self.Message( msg )
break
if data :
dataList.append( data )
if not dataList :
msg = 'No stage gauge data for these basins.\n'
self.Message( msg )
return
period_record = times[ -1 ] - times[ 0 ] # timedelta
self.PlotData( times, dataList,
basinNames = basin_names,
plotVariable = 'Stage',
period_record_days = period_record.days,
title = 'Gauge: ',
path = ' from: ' + self.model.args.basinStage )
#----------------------------------------------------------------
#
#----------------------------------------------------------------
def PlotData( self, time, dataList, basinNames, plotVariable,
period_record_days, title = 'Basins: ', path = '' ) :
''' '''
if self.model.args.DEBUG_ALL :
print( '-> PlotData', flush = True )
for Basin in BasinList :
print( '\t', Basin.name, '\t: ', plotVariable )
if not len( dataList ) or not len( time ):
return
#-------------------------------------------------------
# A top level pop up widget
top = Tk.Toplevel()
top.wm_title( title + plotVariable + path )
colors = iter( cm.rainbow( linspace( 0, 1, len( dataList ) ) ) )
color = next( colors )
figure = Figure( figsize = ( 8, 5 ), dpi = 100 )
axes = figure.add_subplot( 111, label = "PlotData" )
axes.plot( time, dataList[ 0 ], label = basinNames[ 0 ],
linewidth = 2, color = color )
for i in range( 1, len( dataList ) ) :
color = next( colors )
axes.plot( time, dataList[ i ],
label = basinNames[ i ],
linewidth = 2, color = color )
axes.set_xlabel( 'Date' )
axes.set_ylabel( plotVariable + ' ' +\
constants.PlotVariableUnit[ plotVariable ] )
axes.fmt_xdata = DateFormatter('%Y-%m-%d')
# matplotlib does not default ticks well... arghhh
if period_record_days < 15 :
axes.xaxis.set_major_locator ( DayLocator() )
axes.xaxis.set_major_formatter( DateFormatter('%d') )
elif period_record_days < 91 :
axes.xaxis.set_major_locator ( MonthLocator() )
axes.xaxis.set_major_formatter( DateFormatter('%m-%d') )
axes.xaxis.set_minor_locator ( DayLocator(bymonthday=[7,14,21]))
axes.xaxis.set_minor_formatter( DateFormatter('%d') )
elif period_record_days < 181 :
axes.xaxis.set_major_locator ( MonthLocator() )
axes.xaxis.set_major_formatter( DateFormatter('%b-%d') )
axes.xaxis.set_minor_locator ( DayLocator(bymonthday=[15]))
axes.xaxis.set_minor_formatter( DateFormatter('%d') )
elif period_record_days < 366 :
axes.xaxis.set_major_locator ( MonthLocator() )
axes.xaxis.set_major_formatter( DateFormatter('%b') )
elif period_record_days < 731 :
axes.xaxis.set_major_locator ( YearLocator() )
axes.xaxis.set_major_formatter( DateFormatter('%Y') )
axes.xaxis.set_minor_locator ( MonthLocator(bymonth=[3,5,7,9,11]))
axes.xaxis.set_minor_formatter( DateFormatter('%b') )
elif period_record_days < 1826 :
axes.xaxis.set_major_locator ( YearLocator() )
axes.xaxis.set_major_formatter( DateFormatter('%Y') )
axes.xaxis.set_minor_locator ( MonthLocator(bymonth=[7]) )
axes.xaxis.set_minor_formatter( DateFormatter('%b') )
else :
axes.xaxis.set_major_locator ( YearLocator() )
axes.xaxis.set_major_formatter( DateFormatter('%Y') )
legend = axes.legend( loc = 'upper center', fontsize = 9,
frameon = False, labelspacing = None )
#figure.autofmt_xdate( bottom = 0.2, rotation = 90 )
figure.set_tight_layout( True ) # tight_layout()
# Tk.DrawingArea
canvas = FigureCanvasTkAgg( figure, master = top )
try :
canvas.draw()
except RuntimeError as err :
msg = "\nPlotData: {0}. \n".format( err ) +\
" Try setting the start/end time to cover the data record.\n"
self.Message( msg )
top.destroy()
return
canvas.get_tk_widget().pack( side = Tk.TOP, fill = Tk.BOTH,
expand = True )
toolbar = NavigationToolbar2Tk( canvas, top )
toolbar.update()
canvas._tkcanvas.pack( side = Tk.TOP, fill = Tk.BOTH, expand = True )
#----------------------------------------------------------------
#
#----------------------------------------------------------------
def RenderBasins( self, init = False ):
''' '''
for Basin in self.model.Basins.values() :
if Basin.boundary_basin :
continue
basin_xy = Basin.basin_xy
if basin_xy is None :
continue
basin_name = Basin.name
if init :
# Initialize Basin.color with salinity color
Basin.SetBasinMapColor( 'Salinity',
self.model.args.salinity_legend_bounds )
if not Basin.Axes_fill :
PolygonList = self.figure_axes.fill(
basin_xy[:,0], basin_xy[:,1],
fc = Basin.color,
ec = 'white',
zorder = -1,
picker = True,
label = basin_name ) # NOTE: this is a list...!
Basin.Axes_fill = PolygonList[ 0 ]
else :
# Don't call fill() again if not init, it creates a new Polygon
Basin.Axes_fill.set_color( Basin.color )
#----------------------------------------------------------------
#
#----------------------------------------------------------------
def RenderShoals( self, init = False ):
''' '''
for Shoal in self.model.Shoals.values():
line_xy = Shoal.line_xy
if line_xy is None :
continue
shoal_number = Shoal.name
if init :
Line2D_List = self.figure_axes.plot( line_xy[:,0],
line_xy[:,1],
#color,
linewidth = 3,
label = shoal_number,
picker = True )
Shoal.Axes_plot = Line2D_List[ 0 ]
else :
Shoal.Axes_plot.set_color( (1, 1, 1) )
#----------------------------------------------------------------
#
#----------------------------------------------------------------
def PlotLegend( self, label, init = True ) :
''' '''
if self.model.args.DEBUG_ALL:
print( '-> PlotLegend', flush = True )
# Add an axes at position rect [left, bottom, width, height]
# where all quantities are in fractions of figure width and height.
# Just returns the existing axis if it already exists
legendAxis = self.figure.add_axes( [ 0.05, 0.95, 0.7, 0.03 ],
label = 'PlotLegend' + label )
legend_color_map = ListedColormap( self.colors )
#-----------------------------------------------------------
# Set appropriate legend and data type for map plot
plotVariable = self.mapPlotVariable.get()
legend_bounds = None
legend_label = None
if plotVariable == 'Salinity' :
legend_bounds = self.model.args.salinity_legend_bounds
legend_label = 'Salinity (ppt)'
elif plotVariable == 'Stage' :
legend_bounds = self.model.args.stage_legend_bounds
legend_label = 'Stage (m)'
elif plotVariable == 'Temperature' or plotVariable == 'Phosphate' or \
plotVariable == 'Nitrate' or plotVariable == 'Ammonium' or \
plotVariable == 'Oxygen' or plotVariable == 'TOC' :
msg = '\nInvalid map legend variable selected, showing Stage.\n'
self.Message( msg )
legend_label = 'Stage (m)'
legend_bounds = self.model.args.stage_legend_bounds
else :
msg = '\nError. Failed to find map legend type, showing Stage.\n'
self.Message( msg )
legend_label = 'Stage (m)'
legend_bounds = self.model.args.stage_legend_bounds
#-----------------------------------------------------------
norm = BoundaryNorm( legend_bounds, legend_color_map.N )
if init :
self.colorbar = ColorbarBase(
legendAxis,
cmap = legend_color_map,
norm = norm,
ticklocation = 'bottom',
ticks = legend_bounds,
#boundaries = legend_bounds,
spacing = 'proportional', #'uniform',
orientation = 'horizontal' )
else:
self.colorbar.set_norm( norm )
self.colorbar.update_ticks( legend_bounds )
self.colorbar.set_label( legend_label, size = 12 )
self.colorbar.ax.tick_params( labelsize = 10 )
#----------------------------------------------------------------
#
#----------------------------------------------------------------
def MouseClick( self, event ):
'''For event.mouseevent see:
http://matplotlib.org/api/backend_bases_api.html
#matplotlib.backend_bases.MouseEvent'''
if self.model.args.DEBUG_ALL:
print( '-> MouseClick' )
print( 'event.mouseevent: ', event.mouseevent )
print( 'event.mouseevent.button: ', event.mouseevent.button )
print( 'event.artist: ', event.artist )
left_click = False
middle_click = False
right_click = False
BasinObj = None
if event.mouseevent.button == 1 :
left_click = True
elif event.mouseevent.button == 2 :
middle_click = True
elif event.mouseevent.button == 3 :
right_click = True
#--------------------------------------------------------
# Left Click selects an object, either Basin or Shoal
# and prints its info
if left_click :
# Shoals are instantiated as Line2D
if isinstance( event.artist, Line2D ):
line = event.artist
# find the Shoal object
for shoal_number, Shoal in self.model.Shoals.items() :
if Shoal.Axes_plot == None :
continue
if Shoal.Axes_plot.get_label == line.get_label :
Shoal.Print( shoal_number )
break
# Basins are instantiated as Polygon
elif isinstance( event.artist, Polygon ):
patch = event.artist
# Find the Basin object
Basin = None
for Basin in self.model.Basins.values() :
if Basin.Axes_fill == None :
continue
if Basin.Axes_fill.get_label() == patch.get_label() :
Basin.Print()
break
# Select this basin in the basinListBox
self.basinListBox.selection_clear( first = 1,
last = max( self.basinListBoxMap.values() ) )
self.basinListBox.selection_set(
self.basinListBoxMap[ Basin.name ] )
self.basinListBox.see( self.basinListBoxMap[ Basin.name ] )
#--------------------------------------------------------
# Middle Click selects a Basin and prints it's info
elif middle_click :
# Basins are instantiated as Polygon
if isinstance( event.artist, Polygon ):
patch = event.artist
# Find the Basin object
for Basin in self.model.Basins.values() :
if Basin.Axes_fill == None :
continue
if Basin.Axes_fill.get_label() == patch.get_label() :
Basin.Print()
break
#--------------------------------------------------------
# Right Click selects a Basin and pops up its Shoal list
# Selecting a listbox item prints the shoal information
elif right_click :
# Basins are instantiated as Polygon
if isinstance( event.artist, Polygon ):
patch = event.artist
# Find the Basin object
for Basin in self.model.Basins.values() :
if Basin.Axes_fill == None :
continue
if Basin.Axes_fill.get_label() == patch.get_label() :
BasinObj = Basin
if self.model.args.DEBUG_ALL :
print( '<<<< Basin Right Click:',
Basin.name, ' Shoals:', Basin.shoal_nums,
'>>>>', flush = True )
break
if BasinObj :
# A top level pop up widget
top = Tk.Toplevel()
top.title( BasinObj.name + ' Shoals' )
# Shoal Listbox
self.shoalListBox = Tk.Listbox( top, height = 11, width = 30,
selectmode = Tk.EXTENDED,
font = constants.textFont )
# Insert shoal numbers into the Listbox
# The listvariable = [] option won't work if
# there is whitespace in a name, so insert them manually
i = 1
for shoal_number in BasinObj.shoal_nums :
Basin_A_key = self.model.Shoals[ shoal_number ].Basin_A_key
Basin_B_key = self.model.Shoals[ shoal_number ].Basin_B_key
shoalInfo = str( shoal_number ) + ' ' +\
self.model.Basins[ Basin_A_key ].name + ' : ' +\
self.model.Basins[ Basin_B_key ].name
self.shoalListBox.insert( i, shoalInfo )
i = i + 1
# Create a vertical scroll bar for the Listbox
# Call the Listbox yview func when the user moves the scrollbar
scrollBar = ttk.Scrollbar( top, orient = Tk.VERTICAL,
command = self.shoalListBox.yview )
# Tell the Listbox it will scroll according to the scrollBar
self.shoalListBox.configure( yscrollcommand = scrollBar.set )
# Colorize alternating lines of the listbox
for i in range( 0, len( BasinObj.shoal_nums ), 2):
self.shoalListBox.itemconfigure( i, background = '#f0f0ff' )
# Can use pack here since this is a standalone widget that
# doesn't interact with a grid geometry
scrollBar.pack( side = Tk.LEFT, expand = True, fill = Tk.Y )
self.shoalListBox.pack( side = Tk.RIGHT, expand = True,
fill = Tk.BOTH )
# Tell Listbox to call ProcessShoalListbox()
self.shoalListBox.bind( '<<ListboxSelect>>',
self.ProcessShoalListbox )
#----------------------------------------------------------------
#
#----------------------------------------------------------------
def ProcessShoalListbox( self, *args ):
'''Read the Shoal listbox selection, Print the Shoal info.'''
# \n separated items in one string
selected = self.shoalListBox.selection_get()
shoal_list = selected.split( '\n' ) # A list of strings
if self.model.args.DEBUG_ALL:
print( '-> ProcessShoalListbox() ', len( shoal_list ), '\n',
shoal_list, flush = True )
# Find the Shoal objects and store in Shoal_list
Shoal_list = []
for shoal_info in shoal_list :
words = shoal_info.split()
shoal_number = int( words[ 0 ] )
Shoal = self.model.Shoals[ shoal_number ]
Shoal_list.append( Shoal )
Shoal.Print( shoal_number )
# return Shoal_list
#----------------------------------------------------------------
#
#----------------------------------------------------------------
def ProcessBasinListbox( self, *args ):
'''Print the Basin listbox selections'''
Basin_list = self.GetBasinListbox()
for Basin in Basin_list :
Basin.Print()
#----------------------------------------------------------------
#
#----------------------------------------------------------------
def GetBasinListbox( self ):
'''Read the Basin listbox selection.
Return a list of Basin objects.'''
# Get the names of selected basins
# \n separated items in one string
try :
selected = self.basinListBox.selection_get()
except Tk._tkinter.TclError :
# Nothing is selected
return []
basin_name_list = selected.split( '\n' ) # A list of strings
if self.model.args.DEBUG_ALL :
print( '-> GetBasinListbox() ', len( basin_name_list ), '\n',
basin_name_list, flush = True )
# Find the Basin objects and store in Basin_list
Basin_list = []
for basin_name in basin_name_list :
for Basin in self.model.Basins.values() :
# Since the Basins keys are basin numbers, match the names
if Basin.name == basin_name :
Basin_list.append( Basin )
return Basin_list
#-----------------------------------------------------------
#
#-----------------------------------------------------------
def ShowAboutInfo( self ):
messagebox.showinfo( message = self.model.Version )
#-----------------------------------------------------------
#
#-----------------------------------------------------------
def OpenInitFile( self ):
'''Open a basin initialization file and call InitTimeBasin.'''
if self.model.args.DEBUG_ALL :
print( '-> OpenInitFile(): ', flush = True )
input_file = filedialog.askopenfilename(
initialdir = self.model.args.path + 'data/init/',
initialfile = 'Basin_Initial_Values.csv',
filetypes = [('Basin Init Files', '*.csv')],
multiple = False,
title = 'Basin Initialization File' )
if not input_file :
return
# Since we store the path and files seperately, but input_file
# contains the whole path, strip off the model base path.
# Since askopenfilename returns the entire path that may not
# have the same prefix specified in args.path (since args.path
# may be referring to a symbolic link), strip off everything
# prior to data/init : this is stupid since it now requires
# this file to reside in data/init...
input_file = input_file[ input_file.rfind( 'data/init/' ) : ]
self.model.args.basinInit = input_file
InitTimeBasins( self.model )
#-----------------------------------------------------------
#
#-----------------------------------------------------------
def EditFile( self ):
'''Edit a file with the current editor.'''
if self.model.args.DEBUG_ALL:
print( '-> EditFile(): ', flush = True )
edit_file = filedialog.askopenfilename(
initialdir = self.model.args.path,
# initialfile = '',
filetypes = [ ('Data', '*.csv'),
('Source', '*.py' ),
('R', '*.R' ),
('All', '*' ) ],
multiple = False )
if not edit_file :
return
cmdLine = self.model.args.editor + ' ' + edit_file.replace(' ', '\ ')
try :
sp = Popen( cmdLine, shell = True )
except OSError as err:
msg = "\nEditFile: OS error: {0}\n".format( err )
self.Message( msg )
except ValueError as err:
msg = "\nEditFile: Value error: {0}\n".format( err )
self.Message( msg )
except:
errMsg = "\nEditFile Error:" + sys.exc_info()[0] + '\n.'
self.Message( msg )
raise Exception( errMsg )
#-----------------------------------------------------------
#
#-----------------------------------------------------------
def GetPlotDir( self ):
'''Set the directory for disk/archive plots.'''
if self.model.args.DEBUG_ALL :
print( '-> (): GetPlotDir', flush = True )
# Tk.filedialog.askdirectory clears the listbox selection, save it
BasinList = self.GetBasinListbox()
if path_exists( self.last_plot_dir ) :
initial_plot_dir = self.last_plot_dir
else :
initial_plot_dir = self.model.args.homeDir
# Get the file path and runid
archive_dir = filedialog.askdirectory(
initialdir = initial_plot_dir,
title = 'Plot Archive Directory',
mustexist = True )
# Reset the listbox selection
for Basin in BasinList :
self.basinListBox.selection_set(self.basinListBoxMap[ Basin.name ])
if not archive_dir :
return
self.plot_dir = archive_dir
#-----------------------------------------------------------
#
#-----------------------------------------------------------
def GetOutputDir( self ):
'''Set the directory for basin output files.'''
if self.model.args.DEBUG_ALL :
print( '-> (): GetOutputDir', flush = True )
# Tk.filedialog.askdirectory clears the listbox selection, save it
BasinList = self.GetBasinListbox()
if path_exists( self.model.args.basinOutputDir ) :
initial_out_dir = self.model.args.basinOutputDir
else :
initial_out_dir = self.model.args.homeDir
# Get the file path and runid
output_dir = filedialog.askdirectory(
initialdir = initial_out_dir,
title = 'Basin Output Directory',
mustexist = False )
# Reset the listbox selection
for Basin in BasinList :
self.basinListBox.selection_set(self.basinListBoxMap[ Basin.name ])
if not output_dir :
return
self.model.args.basinOutputDir = output_dir
self.Message( 'Set basin output directory to: ' +\
self.model.args.basinOutputDir + '\n' )
#-----------------------------------------------------------
#
#-----------------------------------------------------------
def CheckTimeEntry( self, newTime, widgetName ):
'''Validate the time entry for an update to start_time or end_time.
Times must be on hour boundaries (zero minutes) since the tidal
data is aligned on the hour.'''
if self.model.args.DEBUG_ALL :
print( '-> CheckTimeEntry(): ', newTime, flush = True )
try :
if ':' in newTime :
format_string = '%Y-%m-%d %H:%M'
else :
format_string = '%Y-%m-%d'
time = datetime.strptime( newTime, format_string )
except ValueError :
# Reset text to original values, return False
if widgetName == str( self.startTimeEntry ) :
start_text = \
str( self.model.start_time.strftime( '%Y-%m-%d %H:%M' ) )
self.start_text.set( start_text )
elif widgetName == str( self.endTimeEntry ) :
end_text = str(self.model.end_time.strftime( '%Y-%m-%d %H:%M' ))
self.end_text.set( end_text )
return False
# Align the time to an hour boundary
time = time - timedelta(minutes = time.minute, seconds = time.second)
# Save both initial times so comparison in InitTimeBasins is valid
self.model.previous_start_time = self.model.start_time
self.model.previous_end_time = self.model.end_time
if widgetName == str( self.startTimeEntry ) :
start_text = str( time.strftime( '%Y-%m-%d %H:%M' ) )
self.start_text.set( start_text )
self.model.start_time = time
elif widgetName == str( self.endTimeEntry ) :
end_text = str( time.strftime( '%Y-%m-%d %H:%M' ) )
self.end_text.set( end_text )
self.model.end_time = time
InitTimeBasins( self.model )
if self.model.args.DEBUG_ALL :
print( ' New time: ', str( time ), flush = True )
return True
#---------------------------------------------------------------
#
#---------------------------------------------------------------
class IntVar :
'''Surrogate for Tk.IntVar() when non-GUI option invoked.
Provides set() and get() methods.'''
def __init__( self, value = 0 ):
self.value = value
def set( self, value ) :
self.value = value
def get( self ) :
return self.value
| gpl-3.0 |
xzh86/scikit-learn | sklearn/feature_selection/__init__.py | 244 | 1088 | """
The :mod:`sklearn.feature_selection` module implements feature selection
algorithms. It currently includes univariate filter selection methods and the
recursive feature elimination algorithm.
"""
from .univariate_selection import chi2
from .univariate_selection import f_classif
from .univariate_selection import f_oneway
from .univariate_selection import f_regression
from .univariate_selection import SelectPercentile
from .univariate_selection import SelectKBest
from .univariate_selection import SelectFpr
from .univariate_selection import SelectFdr
from .univariate_selection import SelectFwe
from .univariate_selection import GenericUnivariateSelect
from .variance_threshold import VarianceThreshold
from .rfe import RFE
from .rfe import RFECV
__all__ = ['GenericUnivariateSelect',
'RFE',
'RFECV',
'SelectFdr',
'SelectFpr',
'SelectFwe',
'SelectKBest',
'SelectPercentile',
'VarianceThreshold',
'chi2',
'f_classif',
'f_oneway',
'f_regression']
| bsd-3-clause |
sergpolly/Thermal_adapt_scripts | BOOTSTRAPS/TODO_Optimal_orgs_bootstrap_proteinsCAIsupport.py | 2 | 5010 | import pandas as pd
import os
import subprocess as sub
import re
import sys
from Bio import SeqUtils
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
root_path = "."
# # archaea/genomes
# data_path = os.path.join(root_path,"ftp.ncbi.nih.gov/genomes/all")
# asm_path = os.path.join(root_path,"archaea_links")
# result_path = os.path.join("..","archaea250")
# we'll calculate protein level CAI and store em in individual files ...
path_CAI = os.path.join(root_path,'genomes_with_CAI')
# # read table with organisms and their taxonid ...
dat = pd.read_csv(os.path.join(root_path,"assembly_description.dat"))
aacids = sorted(list('CMFILVWYAGTSNQDEHRKP'))
def init_iter_dat():
iteration_dat = {}
# we'll populate it with GC,topt, aacids - everything that's needed for the thremal adaptation trends calculation ...
iteration_dat['GC'] = []
iteration_dat['topt'] = []
for aa in aacids:
iteration_dat[aa] = []
return iteration_dat
# consider organisms with the detailed protein description available ...
##### excluding Halophiles and empty entries ...
indcs = (dat.subdivision!='Halobacteria')&(dat.subdivision!='Nanohaloarchaea')
#####
valid_dat_subset = dat[indcs]
# taking into account only those with detailed protein info ...
valid_dat_subset = valid_dat_subset[valid_dat_subset['protein_details']]
# valid_dat_subset = dat[dat['protein_details']]
#####
# reset index just in case ...
valid_dat_subset = valid_dat_subset.reset_index()
PERCENTILE = 0.1
# FRACTION = 0.4
num_iterations = 30
slopes_generated = {}
ribocheck = re.compile('ribosomal protein')
# let's first identify significant oerganisms ...
signif_organisms = []
for asm in valid_dat_subset['assembly'].get_values():
# open a file with the analysed organismal proteins...
protein_fname = os.path.join(path_CAI,'%s_genes.dat'%asm)
# load the data ...
protein_dat = pd.read_csv(protein_fname)
####################
ribosomal = protein_dat['gene_product'].apply(lambda x: ribocheck.search(x) is not None)
ribosomal = protein_dat[ribosomal]
perc80tot = protein_dat['cai'].quantile(q=0.75)
perc20rib = ribosomal['cai'].quantile(q=0.25)
# if there is an evidence for translational adaptation, than ...
if perc80tot<perc20rib:
signif_organisms.append(True)
else:
signif_organisms.append(False)
#####################################
valid_dat_subset['signif_organisms'] = signif_organisms
signif_organisms_subset = valid_dat_subset[valid_dat_subset['signif_organisms']]
signif_organisms_subset = signif_organisms_subset.reset_index()
#####################################
for iteration in xrange(num_iterations):
# sample_indicies = np.random.choice(valid_dat_subset.index,int(dat_size*FRACTION))
# let's create local tiny copy of the dat DataFrame ...
# we'll populate it with GC,topt, aacids - everything that's needed for the thremal adaptation trends calculation ...
iteration_dat = init_iter_dat()
print
print "iteration %d"%iteration
print
for asm, topt in signif_organisms_subset[['assembly','topt']].get_values():
# open a file with the analysed organismal proteins...
protein_fname = os.path.join(path_CAI,'%s_genes.dat'%asm)
# load the data ...
protein_dat = pd.read_csv(protein_fname)
protein_dat_size = protein_dat.index.size
subsample_size = int(protein_dat.index.size*PERCENTILE)
####################
# now instead of taking proeins with high CAI, will be taking random subset of proteins ...
prot_sample_indicies = np.random.choice(protein_dat.index,subsample_size)
prot_subsample = protein_dat.loc[prot_sample_indicies]
print "taking %d random proteins from some organism: average cai (total vs subs): %.2f vs %.2f"%(subsample_size,protein_dat['cai'].mean(),prot_subsample['cai'].mean())
cai_proteome = ''.join(prot_subsample['prot_seq'])
cai_proteome_len = float(len(cai_proteome))
cai_genome = ''.join(prot_subsample['gene_seq'])
#
iteration_dat['topt'].append(topt)
iteration_dat['GC'].append(SeqUtils.GC(cai_genome))
#
for aa in aacids:
# ACHTUNG ACHTUNG ACHTUNG ACHTUNG ...
# multiply by 100.0 or not ?!
iteration_dat[aa].append(100.0*cai_proteome.count(aa)/cai_proteome_len)
# now, as soon as the iteration_dat is ready to go, let's just get amino acid trends out of it ...
slopes_generated[iteration] = []
for aa in aacids:
a,b,r,pval,_ = st.linregress(iteration_dat['topt'],iteration_dat[aa])
slopes_generated[iteration].append(a)
# print counter,'out of ',totcount
slopes_generated = pd.DataFrame(slopes_generated,index=aacids)
slopes_generated.to_csv('BOOTSTRAPED_prots_signifORGS_supportCAI_arch.dat')
# plt.clf(); plt.imshow(slopes_generated.get_values(),interpolation='nearest'); plt.show()
| mit |
Gorgel/minkpy | minkowski_files/MINKOWSKI2_PY/Minkowski_python/minktest.py | 1 | 1299 | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import ctypes
import c2raytools as c2t
# the data we want to analyse, a 3D array of floats
uv = c2t.read_cbin('/home/gorgel/Documents/C2ray/dtbox_smth7.96.cbin')
# the size of the data we want to analyse, a 1D array of three integers (same as the -x -y -z options)
usizev = np.array([504,504,504])
# integer: number of bins to use (same as -b option)
numbins = int(10)
#float: lowest value of threshold (same as -l option)
low = float(0)
# float: highest values of threshold (same as -h option)
high = float(10)
output_array = np.zeros((3,numbins+1,5), dtype=np.float32)
# vsumv - the output, a 3D array of floats. The size is (3,numbins+1,5)
def mink(input_array, array_size, nr_bins, low, high, output_array):
lib_file = '/home/gorgel/Dropbox/simon/plugg/masterarbete/minkowski_files/MINKOWSKI2_PY/testpython/Minkowski_python/minkowski_python.so'
lib = ctypes.cdll.LoadLibrary(lib_file)
func = lib.minkowski
return func(ctypes.c_void_p(input_array.ctypes.data), ctypes.c_void_p(array_size.ctypes.data),\
ctypes.c_int(nr_bins), ctypes.c_float(low) , ctypes.c_float(high) ,ctypes.c_void_p(output_array.ctypes.data))
A = mink(uv, usizev, numbins, high, low, output_array)
| gpl-2.0 |
blutooth/gp-svi | examples/black_box_svi.py | 3 | 3262 | from __future__ import absolute_import
from __future__ import print_function
import matplotlib.pyplot as plt
import autograd.numpy as np
import autograd.numpy.random as npr
import autograd.scipy.stats.multivariate_normal as mvn
import autograd.scipy.stats.norm as norm
from autograd import grad
from optimizers import adam
def black_box_variational_inference(logprob, D, num_samples):
"""Implements http://arxiv.org/abs/1401.0118, and uses the
local reparameterization trick from http://arxiv.org/abs/1506.02557"""
def unpack_params(params):
# Variational dist is a diagonal Gaussian.
mean, log_std = params[:D], params[D:]
return mean, log_std
def gaussian_entropy(log_std):
return 0.5 * D * (1.0 + np.log(2*np.pi)) + np.sum(log_std)
rs = npr.RandomState(0)
def variational_objective(params, t):
"""Provides a stochastic estimate of the variational lower bound."""
mean, log_std = unpack_params(params)
samples = rs.randn(num_samples, D) * np.exp(log_std) + mean
lower_bound = gaussian_entropy(log_std) + np.mean(logprob(samples, t))
return -lower_bound
gradient = grad(variational_objective)
return variational_objective, gradient, unpack_params
if __name__ == '__main__':
# Specify an inference problem by its unnormalized log-posterior.
D = 2
def log_posterior(x, t):
"""An example 2D intractable distribution:
a Gaussian evaluated at zero with a Gaussian prior on the log-variance."""
mu, log_sigma = x[:, 0], x[:, 1]
prior = norm.logpdf(log_sigma, 0, 1.35)
likelihood = norm.logpdf(mu, 0, np.exp(log_sigma))
return prior + likelihood
# Build variational objective.
objective, gradient, unpack_params = \
black_box_variational_inference(log_posterior, D, num_samples=2000)
# Set up plotting code
def plot_isocontours(ax, func, xlimits=[-2, 2], ylimits=[-4, 2], numticks=101):
x = np.linspace(*xlimits, num=numticks)
y = np.linspace(*ylimits, num=numticks)
X, Y = np.meshgrid(x, y)
zs = func(np.concatenate([np.atleast_2d(X.ravel()), np.atleast_2d(Y.ravel())]).T)
Z = zs.reshape(X.shape)
plt.contour(X, Y, Z)
ax.set_yticks([])
ax.set_xticks([])
# Set up figure.
fig = plt.figure(figsize=(8,8), facecolor='white')
ax = fig.add_subplot(111, frameon=False)
plt.ion()
plt.show(block=False)
def callback(params, t, g):
print("Iteration {} lower bound {}".format(t, -objective(params, t)))
plt.cla()
target_distribution = lambda x : np.exp(log_posterior(x, t))
plot_isocontours(ax, target_distribution)
mean, log_std = unpack_params(params)
variational_contour = lambda x: mvn.pdf(x, mean, np.diag(np.exp(2*log_std)))
plot_isocontours(ax, variational_contour)
plt.draw()
plt.pause(1.0/30.0)
print("Optimizing variational parameters...")
init_mean = -1 * np.ones(D)
init_log_std = -5 * np.ones(D)
init_var_params = np.concatenate([init_mean, init_log_std])
variational_params = adam(gradient, init_var_params, step_size=0.1, num_iters=2000, callback=callback)
| mit |
tzulitai/flink | flink-python/pyflink/table/utils.py | 7 | 2811 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.table.types import DataType, LocalZonedTimestampType
def pandas_to_arrow(schema, timezone, field_types, series):
import pyarrow as pa
def create_array(s, t):
try:
return pa.Array.from_pandas(s, mask=s.isnull(), type=t)
except pa.ArrowException as e:
error_msg = "Exception thrown when converting pandas.Series (%s) to " \
"pyarrow.Array (%s)."
raise RuntimeError(error_msg % (s.dtype, t), e)
arrays = [create_array(
tz_convert_to_internal(series[i], field_types[i], timezone),
schema.types[i]) for i in range(0, len(schema))]
return pa.RecordBatch.from_arrays(arrays, schema)
def arrow_to_pandas(timezone, field_types, batches):
import pyarrow as pa
table = pa.Table.from_batches(batches)
return [tz_convert_from_internal(c.to_pandas(date_as_object=True), t, timezone)
for c, t in zip(table.itercolumns(), field_types)]
def tz_convert_from_internal(s, t: DataType, local_tz):
"""
Converts the timestamp series from internal according to the specified local timezone.
Returns the same series if the series is not a timestamp series. Otherwise,
returns a converted series.
"""
if type(t) == LocalZonedTimestampType:
return s.dt.tz_localize(local_tz)
else:
return s
def tz_convert_to_internal(s, t: DataType, local_tz):
"""
Converts the timestamp series to internal according to the specified local timezone.
"""
if type(t) == LocalZonedTimestampType:
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
if is_datetime64_dtype(s.dtype):
return s.dt.tz_localize(None)
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(local_tz).dt.tz_localize(None)
return s
| apache-2.0 |
MicheleMaris/GaussFit | __init__.py | 1 | 36001 | __DESCRIPTION__="""2d Gauss Fit of a single gaussian
The model function would be
R=[
[ cos(psi_ell),sin(psi_ell)]
[-sin(psi_ell),cos(psi_ell)]
]
C=diag(lambda0,lambda1)
R0 = (x0;y0)
is
log(y)=-0.5*(A*x**2+2*B*x*y+C*y**2+K+E*x+F*y)
the fwhm of two axis comes from eigenvectors of matrix
AA=[[A,B],[B,C]]
the center x0,y0 from
(x0;y0)=inv(AA)*(E;F)
the zero point
"""
class MapProfile :
""" Handles the profile of a GRD Map """
def __init__(self,psi_deg_array,radius_array) :
"""MPF=MapProfile(psi_deg_array,radius_array)
psi_deg_array=list of angles along wich to compute profiles
radius_array=list of radii to sample the profiles
"""
import copy
from collections import OrderedDict
import numpy as np
self.M=OrderedDict()
self.psi_deg=copy.deepcopy(psi_deg_array)
self.psi=np.deg2rad(self.psi_deg)
self.radius=copy.deepcopy(radius_array)
self.M['_x']=self._template()
self.M['_y']=self._template()
self.M['_cos_psi']=self._template()
self.M['_sin_psi']=self._template()
for i in range(len(self.psi)) :
self.M['_x'][i]=self.radius*np.cos(self.psi[i])
self.M['_y'][i]=self.radius*np.sin(self.psi[i])
self.M['_cos_psi'][i]=np.cos(self.psi[i])
self.M['_sin_psi'][i]=np.sin(self.psi[i])
def _template(self,dtype='float'):
import numpy as np
return np.zeros([len(self.psi),len(self.radius)],dtype=dtype)
def __getitem__(self,this) :
try :
return self.M[this]
except :
return None
def __setitem__(self,this,that) :
self.M[this]=that
def keys() :
return self.M.keys()
def fill(self,name,GRDMap,argument) :
"extracts profiles of a map of given argument along a list of directions"
self.M[name]=self._template()
for ipsi in range(len(self.psi)) :
self.M[name][ipsi]=GRD.bilinearXY(argument,self.M['_x'][ipsi],self.M['_y'][ipsi])
def fwhm(self,name,returnItp=False,threshold=0.5,returnStats=True) :
"""extracts the fwhm (beware it assumes profiles can be sorted)
if returnItp=True (default False) returns also the value of the profile at the threshold point
if returnStats=True (default False) returns statistics as:
min, max, sqrt(min*max), sqrt(max/min),mean,rotation,amplitude
"""
import numpy as np
pp=np.zeros(len(self.psi))
tt=np.zeros(len(self.psi))
for ipsi in range(len(self.psi)) :
yv=self[name][ipsi]
idx=np.argsort(yv)
yv=self[name][ipsi][idx]
xv=self.radius[idx]
pp[ipsi]=np.interp(threshold,yv,xv)
tt[ipsi]=np.interp(pp[ipsi],self.radius,self[name][ipsi])
pp=2*pp
if returnStats :
Min=pp.min()
Max=pp.max()
A=(np.cos(self.psi)*(pp-pp.mean())).sum()
B=(np.sin(self.psi)*(pp-pp.mean())).sum()
return [Min,Max,(Min*Max)**0.5,(Max/Min)**0.5,pp.mean(),np.rad2deg(np.arctan2(A,B)),(A**2+B**2)**0.5]
if returnItp :
return pp,tt
return pp
class NoCentNoZer :
def __init__(self,U,V,Y,Yth=None,doNotScaleAxis=True) :
import numpy as np
import numpy.linalg as linalg
if Yth == None :
self.YTh = 10**(-0.3)
else :
self.YTh = Yth*1
self.YTh
self.peak=Y.max()
lmap=Y/self.peak
lmap.shape=lmap.size
idx = np.where(lmap>=self.YTh)[0]
lmap = np.log(lmap[idx])
u=U*1
v=V*1
u.shape=u.size
v.shape=v.size
if doNotScaleAxis :
self.uscal=1.
self.vscal=1.
else :
self.uscal=u.max()-u.min()
self.vscal=v.max()-v.min()
u=u[idx]/self.uscal
v=v[idx]/self.vscal
self.n=len(idx)
self.S=np.zeros([3,3])
self.VV=np.zeros(3)
self.S[0,0]= 0.25*(u**4).sum()
self.S[0,1]= 0.5*((u**3)*v).sum()
self.S[0,2]= 0.25*(u**2*v**2).sum()
self.S[1,1]=(u**2*v**2).sum()
self.S[1,2]=0.5*(u*v**3).sum()
self.S[2,2]=0.25*(v**4).sum()
self.S[2,3]=-0.5*(v**2).sum()
for r in range(len(self.VV)) :
for c in range(len(self.VV)) :
if r > c :
self.S[r,c]=self.S[c,r]*1
self.VV[0] = -0.5*(lmap*u**2).sum()
self.VV[1] = -(lmap*v*u).sum()
self.VV[2] = -0.5*(lmap*v**2).sum()
self.pars=np.dot(linalg.inv(self.S),self.VV)
self.inv=linalg.inv(self.S)
self.det=linalg.det(self.S)
self.y=lmap
self.u=u
self.v=v
self.res = (self.pars[0]*self.u**2+2*self.pars[1]*self.u*self.v+self.pars[2]*self.v**2)
self.ksq = self.res**2
self.A=np.zeros([2,2])
self.A[0][0]=self.pars[0]/self.uscal**2
self.A[0][1]=self.pars[1]/self.uscal/self.vscal
self.A[1][0]=self.pars[1]/self.uscal/self.vscal
self.A[1][1]=self.pars[2]/self.vscal**2
self.heighen_val,hv=linalg.eigh(self.A)
self.semiaxis_fwhm=2.*np.sqrt(2.*(np.log(2.)-self.pars[3])/self.heighen_val)*180./np.pi
self.fwhm=(self.semiaxis_fwhm[0]*self.semiaxis_fwhm[1])**0.5
self.ellipticity=self.semiaxis_fwhm.max()/self.semiaxis_fwhm.min()
self.rot=np.transpose(hv/linalg.det(hv))
self.psi_ell=np.arctan2(self.rot[0][1],self.rot[0][0])*180./np.pi
self.gauss_peak=self.peak
def mdl(self,U,V) :
acc = self.pars[0]*(U/self.uscal)**2
acc += 2*self.pars[1]*self.pars[2]*(U/self.uscal)*(V/self.vscal)
acc += self.pars[2]*(V/self.vscal)**2
return -0.5*acc+self.pars[3]
class NoCent :
def __init__(self,U,V,Y,YTh=None,doNotScaleAxis=True,allowed_radius_deg=None) :
import numpy as np
import numpy.linalg as linalg
if YTh == None :
self.YTh = 1e-3
else :
self.YTh = YTh*1
self.peak=Y.max()
lmap=Y/self.peak
lmap.shape=lmap.size
#
radius=np.rad2deg((U**2+V**2)**0.5)
radius.shape=radius.size
if allowed_radius_deg == None :
idx = np.where(lmap>=self.YTh)[0]
print "Select by YTH",len(idx),lmap[idx].min(),lmap.max()
self.allowed_radius=radius[idx].max()
else :
idx = np.where(radius<=allowed_radius_deg)[0]
self.allowed_radius=allowed_radius_deg
self.YTh=lmap[idx].min()
#
lmap = np.log(lmap[idx])
u=U*1
v=V*1
u.shape=u.size
v.shape=v.size
if doNotScaleAxis :
self.uscal=1.
self.vscal=1.
else :
self.uscal=u.max()-u.min()
self.vscal=v.max()-v.min()
u=u[idx]/self.uscal
v=v[idx]/self.uscal
self.n=len(idx)
self.N=len(idx)
self.S=np.zeros([4,4])
self.VV=np.zeros(4)
self.S[0,0]= 0.25*(u**4).sum()
self.S[0,1]= 0.5*((u**3)*v).sum()
self.S[0,2]= 0.25*(u**2*v**2).sum()
self.S[0,3]= -0.5*(u**2).sum()
self.S[1,1]=(u**2*v**2).sum()
self.S[1,2]=0.5*(u*v**3).sum()
self.S[1,3]=-(u*v).sum()
self.S[2,2]=0.25*(v**4).sum()
self.S[2,3]=-0.5*(v**2).sum()
self.S[3,3]=float(len(idx))
for r in range(len(self.VV)) :
for c in range(len(self.VV)) :
if r > c :
self.S[r,c]=self.S[c,r]*1
self.VV[0] = -0.5*(lmap*u**2).sum()
self.VV[1] = -(lmap*v*u).sum()
self.VV[2] = -0.5*(lmap*v**2).sum()
self.VV[3] = (lmap).sum()
self.pars=np.dot(linalg.inv(self.S),self.VV)
self.inv=linalg.inv(self.S)
self.det=linalg.det(self.S)
self.scaled_data=lmap
self.u=u
self.v=v
self.bf_model=-0.5*(self.pars[0]*self.u**2+2*self.pars[1]*self.u*self.v+self.pars[2]*self.v**2)+self.pars[3]
self.res = np.exp(self.bf_model)-np.exp(self.scaled_data)
self.ksq = (self.res**2).sum()
self.A=np.zeros([2,2])
self.A[0][0]=self.pars[0]/self.uscal**2
self.A[0][1]=self.pars[1]/self.uscal/self.vscal
self.A[1][0]=self.pars[1]/self.uscal/self.vscal
self.A[1][1]=self.pars[2]/self.vscal**2
#removes the regularizzation
self.Pars={}
self.Pars['A']=self.pars[0]/self.uscal**2
self.Pars['B']=self.pars[1]/self.uscal/self.vscal
self.Pars['C']=self.pars[2]/self.vscal**2
self.Pars['D']=np.nan
self.Pars['E']=np.nan
self.Pars['F']=self.pars[3]
#
self.R0=np.zeros(2)
self.heighen_val,hv=linalg.eigh(self.A)
self.semiaxis_fwhm=2.*np.sqrt(2.*(np.log(2.)-self.pars[3])/self.heighen_val)*180./np.pi
self.fwhm_min=self.semiaxis_fwhm.min()
self.fwhm_max=self.semiaxis_fwhm.max()
self.fwhm=(self.semiaxis_fwhm.prod())**0.5
self.ellipticity=self.fwhm_max/self.fwhm_min
self.rot=np.transpose(hv/linalg.det(hv))
self.psi_ell=np.arctan2(self.rot[0][1],self.rot[0][0])*180./np.pi
self.zero=self.pars[3]*1
self.gauss_peak=self.peak*np.exp(self.zero)
self.DataTh=self.YTh
def mdl(self,U,V) :
acc = self.pars[0]*(U/self.uscal)**2
acc += 2*self.pars[1]*self.pars[2]*(U/self.uscal)*(V/self.vscal)
acc += self.pars[2]*(V/self.vscal)**2
return -0.5*acc+self.zero
def __str__(self) :
l=[]
l.append("N : "+str(self.n))
#l.append("allowed_radius : "+str(self.allowed_radius))
#l.append("xscal : "+str(self.xscal))
#l.append("yscal : "+str(self.yscal))
#l.append(" : ")
l.append("peak : "+str(self.gauss_peak))
l.append("fwhm : "+str(self.fwhm))
#l.append("fwhm_min : "+str(self.fwhm_min))
#l.append("fwhm_max : "+str(self.fwhm_max))
l.append("ellipticity :"+str(self.ellipticity))
l.append("psi_ell :"+str(self.psi_ell))
return "\n".join(l)
class NoBackground_Deprecated :
def __init__(self,U,V,Y,Yth=None) :
import numpy as np
import numpy.linalg as linalg
if Yth == None :
self.YTh = 10**(-0.3)
else :
self.YTh = Yth*1
self.YTh
self.peak=Y.max()
lmap=Y/self.peak
lmap.shape=lmap.size
idx = np.where(lmap>=self.YTh)[0]
lmap = np.log(lmap[idx])
u=U*1
v=V*1
u.shape=u.size
v.shape=v.size
self.uscal=u.max()-u.min()
self.vscal=v.max()-v.min()
u=u[idx]/self.uscal
v=v[idx]/self.uscal
self.n=len(idx)
self.S=np.zeros([6,6])
self.VV=np.zeros(6)
self.S[0,0]= 0.25*(u**4).sum()
self.S[0,1]= 0.5*((u**3)*v).sum()
self.S[0,2]= 0.25*(u**2*v**2).sum()
self.S[0,3]= -0.5*(u**2).sum()
self.S[0,4]= -0.5*(u**3).sum()
self.S[0,5]= -0.5*(v*u**2).sum()
self.S[1,1]=(u**2*v**2).sum()
self.S[1,2]=0.5*(u*v**3).sum()
self.S[1,3]=-(u*v).sum()
self.S[1,4]= -(v*u**2).sum()
self.S[1,5]= -(u*v**2).sum()
self.S[2,2]=0.25*(v**4).sum()
self.S[2,3]=-0.5*(v**2).sum()
self.S[2,4]= -0.5*(u*v**2).sum()
self.S[2,5]= -0.5*(v**4).sum()
self.S[3,3]=float(len(idx))
self.S[3,4]= -0.5*(u).sum()
self.S[3,5]= -0.5*(v).sum()
self.S[4,4]= -0.5*(u**2).sum()
self.S[4,5]= -0.5*(u*v).sum()
self.S[5,5]= -0.5*(v**2).sum()
for r in range(len(self.VV)) :
for c in range(len(self.VV)) :
if r > c :
self.S[r,c]=self.S[c,r]*1
self.VV[0] = -0.5*(lmap*u**2).sum()
self.VV[1] = -(lmap*v*u).sum()
self.VV[2] = -0.5*(lmap*v**2).sum()
self.VV[3] = (lmap).sum()
self.VV[4] = -0.5*(lmap*u).sum()
self.VV[5] = -0.5*(lmap*v).sum()
self.pars=np.dot(linalg.inv(self.S),self.VV)
self.inv=linalg.inv(self.S)
self.det=linalg.det(self.S)
self.y=lmap
self.u=u
self.v=v
self.res = (self.pars[0]*self.u**2+2*self.pars[1]*self.u*self.v+self.pars[2]*self.v**2+self.pars[3])
self.ksq = self.res**2
self.A=np.zeros([2,2])
self.A[0][0]=self.pars[0]/self.uscal**2
self.A[0][1]=self.pars[1]/self.uscal/self.vscal
self.A[1][0]=self.pars[1]/self.uscal/self.vscal
self.A[1][1]=self.pars[2]/self.vscal**2
self.Vde=np.zeros(2)
self.Vde[0]=self.pars[4]/self.uscal
self.Vde[1]=self.pars[5]/self.vscal
self.R0=np.arcsin(np.dot(linalg.inv(self.A),self.Vde))*180./np.pi*3600.
self.heighen_val,hv=linalg.eigh(self.A)
self.semiaxis_fwhm=2.*np.sqrt(2.*(np.log(2.)-self.pars[3])/self.heighen_val)*180./np.pi
self.fwhm=(self.semiaxis_fwhm[0]*self.semiaxis_fwhm[1])**0.5
self.ellipticity=self.semiaxis_fwhm.max()/self.semiaxis_fwhm.min()
self.rot=np.transpose(hv/linalg.det(hv))
self.psi_ell=np.arctan2(self.rot[0][1],self.rot[0][0])*180./np.pi
a=np.dot(linalg.inv(self.A),self.Vde)
self.zero=self.pars[3]+0.5*(self.A[0][0]*a[0]*a[0]+2.*a[0]*a[1]*self.A[0][1]+a[1]*a[1]*self.A[1][1])
self.gauss_peak=self.peak*np.exp(self.zero)
def mdl(self,U,V) :
acc = self.pars[0]*(U/self.uscal)**2
acc += 2*self.pars[1]*self.pars[2]*(U/self.uscal)*(V/self.vscal)
acc += self.pars[2]*(V/self.vscal)**2
return -0.5*acc+self.pars[3]
class Model :
def __init__(self,xmin,xmax,nx,ymin,ymax,ny) :
import numpy as np
a=np.linspace(xmin,xmax,nx)
self.dX=a[1]-a[0]
self.X=np.tile(np.linspace(xmin,xmax,nx),(ny,1))
self.Y=np.transpose(np.tile(np.linspace(ymin,ymax,ny),(nx,1)))
a=np.linspace(ymin,ymax,ny)
self.dY=a[1]-a[0]
self.R=None
self.D=None
self.fwhm = None
self.fwhm_min = None
self.fwhm_max = None
self.gauss_peak = None
self.ellipticity = None
self.psi_ell = None
self.peak = None
self.R = None
def __str__(self) :
if self.D==None :
return ''
l=[]
l.append("gauss_peak : "+str(self.gauss_peak))
l.append("fwhm : "+str(self.fwhm))
l.append("fwhm_min : "+str(self.fwhm_min))
l.append("fwhm_max : "+str(self.fwhm_max))
l.append("ellipticity :"+str(self.ellipticity))
l.append("psi_ell :"+str(self.psi_ell))
l.append("X0 :"+str(self.R0[0]))
l.append("Y0 :"+str(self.R0[1]))
return "\n".join(l)
def __call__(self,*arg,**karg) :
"""call(NoBackground_Base)
call(peak,x0,y0,psi_ell,fwhm,ellipticity,MinMax=False)
MinMax = False : fwhm=p1, ellipticity=p2, fwhm_min and fwhm_max are derived
MinMax = True : fwhm_min=p1, fwhm_max=p2, fwhm and ellipticity are derived
"""
import numpy as np
if len(arg) == 0 :
return
elif len(arg) == 1 :
try :
self.gauss_peak=arg[0].gauss_peak
self.R0=arg[0].R0
self.psi_ell=arg[0].psi_ell
self.fwhm=arg[0].fwhm
self.fwhm_min=arg[0].fwhm_min
self.fwhm_max=arg[0].fwhm_max
self.ellipticity=arg[0].ellipticity
except :
return
else :
MinMax=False
try :
MinMax=karg['MinMax']==True
except :
MinMax=False
self.gauss_peak=float(arg[0])
self.R0=np.zeros(2)
self.R0[0]=float(arg[1])
self.R0[1]=float(arg[2])
self.psi_ell=float(arg[3])
if MinMax :
self.fwhm_min=float(arg[4])
self.fwhm_max=float(arg[5])
self.fwhm = (self.fwhm_min*self.fwhm_max)**0.5
self.ellipticity = self.fwhm_max/self.fwhm_min
else :
self.fwhm=float(arg[4])
self.ellipticity=float(arg[5])
self.fwhm_min=self.fwhm/self.ellipticity**0.5
self.fwhm_max=self.fwhm*self.ellipticity**0.5
self.mdl()
def mdl(self) :
import numpy as np
x=self.X-self.R0[0]
y=self.Y-self.R0[1]
self.R=(x**2+y**2)**0.5
cp=np.cos(self.psi_ell/180.*np.pi)
sp=np.sin(self.psi_ell/180.*np.pi)
u=(cp*x-sp*y)**2/self.fwhm_max**2
u+=(sp*x+cp*y)**2/self.fwhm_min**2
u*=-8.*np.log(2.)/2.
self.D=self.gauss_peak*np.exp(u)
def imshow(self) :
try :
from matplotlib import pyplot as plt
except :
return
import numpy as np
plt.imshow(self.D,origin='lower')
plt.colorbar()
class NoBackground_Base :
def __init__(self,*arg,**karg) :
"NoBackground_Base(X,Y,D,DataTh=-np.inf,AllowedRadius=np.inf,Weight=None)"
import numpy as np
import numpy.linalg as linalg
if len(arg) < 3 :
return
try :
doNotScaleAxis=float(karg['doNotScaleAxis'])
except :
doNotScaleAxis=True
try :
self.DataTh=float(karg['DataTh'])
except :
self.DataTh=-np.inf
try :
self.AllowedRadius=float(karg['AllowedRadius'])
except :
self.AllowedRadius = np.inf
self.peak=arg[2].max()
# data are regularized
lmap=arg[2]/self.peak
lmap.shape=lmap.size
x=arg[0]*1
y=arg[1]*1
radius=(x**2+y**2)**0.5
radius.shape=radius.size
idx = np.where((lmap>=self.DataTh)*(radius<=self.AllowedRadius))[0]
lmap = np.log(lmap[idx])
try :
self.Weight=karg['Weight']*1
self.Weight.shape=arg[2].size
self.Weight=self.Weight[idx]
self.Weight*=1/self.Weight.sum()
except :
self.Weight=1
self.in_shape=arg[2].shape
self.in_size=arg[2].size
self.logdata_min=lmap.min()
self.tagged=np.zeros(arg[2].shape,dtype='int')
self.tagged.shape=arg[2].size
self.tagged[idx]=1
self.tagged.shape=arg[2].shape
x.shape=x.size
y.shape=y.size
if doNotScaleAxis :
self.xscal=1.
self.yscal=1.
else :
self.xscal=x.max()-x.min()
self.yscal=y.max()-y.min()
x=x[idx]/self.xscal
y=y[idx]/self.yscal
self.N=len(idx)
self.S=np.zeros([6,6])
self.VV=np.zeros(6)
#unknown are A,B,C,D,E,F
#
self.S[0,0]= 0.25*(self.Weight*x**4).sum()
self.S[0,1]= 0.5*((self.Weight*x**3)*y).sum()
self.S[0,2]= 0.25*(self.Weight*x**2*y**2).sum()
self.S[0,3]= 0.25*(self.Weight*x**3).sum()
self.S[0,4]= 0.25*(self.Weight*x**2*y).sum()
self.S[0,5]= -0.5*(self.Weight*x**2).sum()
#
self.S[1,1]= (self.Weight*x**2*y**2).sum()
self.S[1,2]= 0.5*(self.Weight*x*y**3).sum()
self.S[1,3]= 0.5*(self.Weight*x**2*y).sum()
self.S[1,4]= 0.5*(self.Weight*x*y**2).sum()
self.S[1,5]= -(self.Weight*x*y).sum()
#
self.S[2,2]= 0.25*(self.Weight*y**4).sum()
self.S[2,3]= 0.25*(self.Weight*x*y**2).sum()
self.S[2,4]= 0.25*(self.Weight*y**3).sum()
self.S[2,5]= -0.5*(self.Weight*y**2).sum()
#
self.S[3,3]= 0.25*(self.Weight*x**2).sum()
self.S[3,4]= 0.25*(self.Weight*x*y).sum()
self.S[3,5]= -0.5*((self.Weight*x).sum()).min()
#
self.S[4,4]= 0.25*(self.Weight*y**2).sum()
self.S[4,5]= -0.5*(self.Weight*y.sum()).min()
#
self.S[5,5]= float(len(idx))
for r in range(len(self.VV)) :
for c in range(len(self.VV)) :
if r > c :
self.S[r,c]=self.S[c,r]*1
self.VV[0] = -0.5*(self.Weight*lmap*x**2).sum()
self.VV[1] = -(self.Weight*lmap*x*y).sum()
self.VV[2] = -0.5*(self.Weight*lmap*y**2).sum()
self.VV[3] = -0.5*(self.Weight*lmap*x).sum()
self.VV[4] = -0.5*(self.Weight*lmap*y).sum()
self.VV[5] = ((self.Weight*lmap).sum()).min()
#
self.inv=linalg.inv(self.S)
self.det=linalg.det(self.S)
self.pars=np.dot(linalg.inv(self.S),self.VV)
self.ld=lmap
self.x=x
self.y=y
self.res = self.mdl(x,y)-lmap
self.ksq_log = (self.res**2).sum()
#removes the regularizzation
self.Pars={}
self.Pars['A']=self.pars[0]/self.xscal**2
self.Pars['B']=self.pars[1]/self.xscal/self.yscal
self.Pars['C']=self.pars[2]/self.yscal**2
self.Pars['D']=self.pars[3]/self.xscal
self.Pars['E']=self.pars[4]/self.yscal
self.Pars['F']=self.pars[5]+np.log(self.peak)
# find the invC matrix
self.MinvC=np.zeros([2,2])
self.MinvC[0][0]=self.Pars['A']*1.
self.MinvC[0][1]=self.Pars['B']*1.
self.MinvC[1][0]=self.Pars['B']*1.
self.MinvC[1][1]=self.Pars['C']*1.
# find the V0 vector
self.V0=np.zeros(2)
self.V0[0]=self.Pars['D']*1.
self.V0[1]=self.Pars['E']*1.
# find the center
self.MC = np.zeros([2,2])
self.MC[0][0]=self.Pars['C']*1.
self.MC[0][1]=-self.Pars['B']*1.
self.MC[1][0]=-self.Pars['B']*1.
self.MC[1][1]=self.Pars['A']*1.
self.MC=self.MC/(self.Pars['A']*self.Pars['C']-self.Pars['B']**2)
self.R0=np.zeros(2)
self.R0[0]=self.Pars['C']*self.Pars['D']-self.Pars['B']*self.Pars['E']
self.R0[1]=-self.Pars['B']*self.Pars['D']+self.Pars['A']*self.Pars['E']
self.R0 = -0.5*self.R0/(self.Pars['A']*self.Pars['C']-self.Pars['B']**2)
# find the allowed radius
self.allowed_radius=(((self.x*self.xscal-self.R0[0])**2+(self.y*self.yscal-self.R0[1])**2)**0.5).max()
# find the eigenvalues and eighenvectors
self.heighen_val,self.heighen_vec=linalg.eigh(self.MinvC)
semiaxis_fwhm=2.*np.sqrt(2.*(np.log(2.))/self.heighen_val)
self.rot=np.transpose(self.heighen_vec/linalg.det(self.heighen_vec))
for i in range(2) : self.rot[i]*=-1 if self.rot[i][i] < 0 else 1
# extract the gaussian parameters
hv=self.heighen_vec
for i in range(2) : hv[i]*=-1 if hv[i][i] < 0 else 1
self.psi_ell=np.arctan2(hv[1][0],hv[0][0])*180./np.pi
self.fwhm_min=semiaxis_fwhm.min()
self.fwhm_max=semiaxis_fwhm.max()
self.fwhm=(self.fwhm_max*self.fwhm_min)**0.5
self.ellipticity=self.fwhm_max/self.fwhm_min
#self.zero=self.Pars['F']-0.5/4.*self.Pars['A']*self.Pars['D']**2-1./4.*self.Pars['B']*self.Pars['E']*self.Pars['D']-0.5/4.*self.Pars['C']*self.Pars['E']**2
self.zero=self.Pars['F']+0.5*(self.Pars['A']*self.R0[0]**2+2*self.Pars['B']*self.R0[0]*self.R0[1]+self.Pars['C']*self.R0[1]**2)
self.gauss_at_center=np.exp(self.zero)
self.gauss_peak=np.exp(self.zero)
self.gauss_ksq=((np.exp(self.res)-self.peak*np.exp(lmap))**2).sum()
def mdl(self,x,y) :
acc = self.pars[0]*x**2
acc += 2.*self.pars[1]*x*y
acc += self.pars[2]*y**2
acc += self.pars[3]*x
acc += self.pars[4]*y
acc *= -0.5
acc += self.pars[5]
return acc
def test_map(self,X,Y,X0,Y0,fwhm_min,fwhm_max,psi_ell,peak) :
import numpy as np
cp=np.cos(psi_ell/180.*np.pi)
sp=np.sin(psi_ell/180.*np.pi)
u=(X-X0)*cp+(Y-Y0)*sp
v=-(X-X0)*sp+(Y-Y0)*cp
smin=fwhm_min/(2.*np.sqrt(2.*np.log(2.)))
smax=fwhm_max/(2.*np.sqrt(2.*np.log(2.)))
return peak*np.exp(-0.5*( (u/smax)**2 + (v/smin)**2))
def __str__(self) :
l=[]
l.append("in_shape : "+str(self.in_shape))
l.append("in_size : "+str(self.in_size))
l.append("DataTh : "+str(self.DataTh))
l.append("AllowedRadius : "+str(self.AllowedRadius))
l.append("N : "+str(self.N))
l.append("allowed_radius : "+str(self.allowed_radius))
l.append("xscal : "+str(self.xscal))
l.append("yscal : "+str(self.yscal))
l.append(" : ")
l.append("peak : "+str(self.gauss_peak))
l.append("fwhm : "+str(self.fwhm))
l.append("fwhm_min : "+str(self.fwhm_min))
l.append("fwhm_max : "+str(self.fwhm_max))
l.append("ellipticity :"+str(self.ellipticity))
l.append("psi_ell :"+str(self.psi_ell))
l.append("X0 :"+str(self.R0[0]))
l.append("Y0 :"+str(self.R0[1]))
return "\n".join(l)
class NoBackground(NoBackground_Base) :
def __init__(self,X,Y,D,DataTh=None,AllowedRadius=None,Weight=None,doNotScaleAxis=True) :
import numpy as np
NoBackground_Base.__init__(self,X,Y,D,DataTh=DataTh,AllowedRadius=AllowedRadius,Weight=Weight,doNotScaleAxis=doNotScaleAxis)
self.R0 = np.arcsin(self.R0)*180./np.pi
self.fwhm = self.fwhm*180./np.pi
self.fwhm_min = self.fwhm_min*180./np.pi
self.fwhm_max = self.fwhm_max*180./np.pi
self.xscal = np.arcsin(self.xscal)*180./np.pi
self.yscal = np.arcsin(self.yscal)*180./np.pi
self.allowed_radius= self.allowed_radius
class gaussCanonicalForm_NoCent :
"""used to convert gauss from Closed Form without Center:
D=0, E=0
"""
def __init__(self,GaussClosedForm) :
import numpy as np
#find the background
self.background=GaussClosedForm.b*1
# find the invC matrix
self.MinvC=np.zeros([2,2])
self.MinvC[0][0]=GaussClosedForm.A*1.
self.MinvC[0][1]=GaussClosedForm.B*1.
self.MinvC[1][0]=GaussClosedForm.B*1.
self.MinvC[1][1]=GaussClosedForm.C*1.
# find the center
self.MC = np.zeros([2,2])
self.MC[0][0]=GaussClosedForm.C*1.
self.MC[0][1]=-GaussClosedForm.B*1.
self.MC[1][0]=-GaussClosedForm.B*1.
self.MC[1][1]=GaussClosedForm.A*1.
self.MC=self.MC/(GaussClosedForm.A*GaussClosedForm.C-GaussClosedForm.B**2)
self.R0=np.zeros(2)
# find the allowed radius
self.allowed_radius=(((self.x*self.xscal-self.R0[0])**2+(self.y*self.yscal-self.R0[1])**2)**0.5).max()
# find the eigenvalues and eighenvectors
self.heighen_val,self.heighen_vec=linalg.eigh(self.MinvC)
semiaxis_fwhm=2.*np.sqrt(2.*(np.log(2.))/self.heighen_val)
self.rot=np.transpose(self.heighen_vec/linalg.det(self.heighen_vec))
for i in range(2) : self.rot[i]*=-1 if self.rot[i][i] < 0 else 1
# extract the gaussian parameters
hv=self.heighen_vec
for i in range(2) : hv[i]*=-1 if hv[i][i] < 0 else 1
self.psi_ell=np.arctan2(hv[1][0],hv[0][0])*180./np.pi
self.fwhm_min=semiaxis_fwhm.min()
self.fwhm_max=semiaxis_fwhm.max()
self.fwhm=(self.fwhm_max*self.fwhm_min)**0.5
self.ellipticity=self.fwhm_max/self.fwhm_min
self.zero=GaussClosedForm.F
self.gauss_peak=np.exp(self.zero)
def csv(self,header=False,fsept=', ',fmt='%20.18e') :
"returns a csv table line with the essential information, X0 and Y0 are forced to be 0"
if header :
return fsept.join(['peak','X0','Y0','fwhm','ellipticity','psi_ell','background'])
return fsept.join([fmt%self.gauss_peak,fmt%0,fmt%0,fmt%self.fwhm,fmt%self.ellipticity,fmt%self.psi_ell,fmt%self.background])
class gaussCanonicalForm :
"""used to convert gauss from Closed Form"""
def __init__(self,GaussClosedForm) :
import numpy as np
#find the background
self.background=GaussClosedForm.b*1
# find the invC matrix
self.MinvC=np.zeros([2,2])
self.MinvC[0][0]=GaussClosedForm.A*1.
self.MinvC[0][1]=GaussClosedForm.B*1.
self.MinvC[1][0]=GaussClosedForm.B*1.
self.MinvC[1][1]=GaussClosedForm.C*1.
# find the V0 vector
self.V0=np.zeros(2)
self.V0[0]=GaussClosedForm.D*1.
self.V0[1]=GaussClosedForm.E*1.
# find the center
self.MC = np.zeros([2,2])
self.MC[0][0]=GaussClosedForm.C*1.
self.MC[0][1]=-GaussClosedForm.B*1.
self.MC[1][0]=-GaussClosedForm.B*1.
self.MC[1][1]=GaussClosedForm.A*1.
self.MC=self.MC/(GaussClosedForm.A*GaussClosedForm.C-GaussClosedForm.B**2)
self.R0=np.zeros(2)
self.R0[0]=GaussClosedForm.C*GaussClosedForm.D-GaussClosedForm.B*GaussClosedForm.E
self.R0[1]=-GaussClosedForm.B*GaussClosedForm.D+GaussClosedForm.A*GaussClosedForm.E
self.R0 = -0.5*self.R0/(GaussClosedForm.A*GaussClosedForm.C-GaussClosedForm.B**2)
# find the allowed radius
self.allowed_radius=(((self.x*self.xscal-self.R0[0])**2+(self.y*self.yscal-self.R0[1])**2)**0.5).max()
# find the eigenvalues and eighenvectors
self.heighen_val,self.heighen_vec=linalg.eigh(self.MinvC)
semiaxis_fwhm=2.*np.sqrt(2.*(np.log(2.))/self.heighen_val)
self.rot=np.transpose(self.heighen_vec/linalg.det(self.heighen_vec))
for i in range(2) : self.rot[i]*=-1 if self.rot[i][i] < 0 else 1
# extract the gaussian parameters
hv=self.heighen_vec
for i in range(2) : hv[i]*=-1 if hv[i][i] < 0 else 1
self.psi_ell=np.arctan2(hv[1][0],hv[0][0])*180./np.pi
self.fwhm_min=semiaxis_fwhm.min()
self.fwhm_max=semiaxis_fwhm.max()
self.fwhm=(self.fwhm_max*self.fwhm_min)**0.5
self.ellipticity=self.fwhm_max/self.fwhm_min
self.zero=GaussClosedForm.F+0.5*(GaussClosedForm.A*self.R0[0]**2+2*GaussClosedForm.B*self.R0[0]*self.R0[1]+GaussClosedForm.C*self.R0[1]**2)
self.gauss_peak=np.exp(self.zero)
def csv(self,header=False,fsept=', ',fmt='%20.18e') :
"returns a csv table line with the essential information"
if header :
return fsept.join(['peak','X0','Y0','fwhm','ellipticity','psi_ell','background'])
return fsept.join([fmt%self.gauss_peak,fmt%self.R0[0],fmt%self.R0[1],fmt%self.fwhm,fmt%self.ellipticity,fmt%self.psi_ell,fmt%self.background])
class gaussClosedForm :
"""class to handle a gaussian curve in closed form
"""
def __init__(self,A,B,C,D,E,F,b) :
"defines a closed form gaussian for A,B,C,D,E,F,b"
self.A=A
self.B=B
self.C=C
self.D=D
self.E=E
self.F=F
self.b=b
def calc(self,X,Y):
"computes for X and Y"
import numpy as np
acc=self.A*X**2
acc+=self.B*X*Y
acc+=self.C*Y**2
acc+=self.D*X
acc+=self.E*Y
return np.exp(-0.5*acc+self.F)+self.b
def canonization(self) :
"convert closed form parameters to canonical form"
return gaussCanonicalForm(self)
def csv(self,header=False,fsept=', ',fmt='%20.18e') :
"returns a csv table line with the essential information"
if header :
return fsept.join(['A','B','C','D','E','F','b'])
return fsept.join([fmt%self.A,fmt%self.B,fmt%self.C,fmt%self.D,fmt%self.E,fmt%self.F,fmt%self.b])
def __call__(self,XY,A,B,C,D,E,F,b) :
"""call to perform fit with curve_fit
XY = array of X and Y"""
self.A=A
self.B=B
self.C=C
self.D=D
self.E=E
self.F=F
self.b=b
return self.calc(XY[0],XY[1])
class efficient_gaussClosedForm_for_fit :
"""class to compute 'efficiently' a gaussian distribution in closed form"""
def __init__(self,X,Y) :
"defines a closed form gaussian for A,B,C,D,E,F,b"
import copy
self.X=copy.deepcopy(X)
self.Y=copy.deepcopy(Y)
def __call__(self,A,B,C,D,E,F,b) :
"computes for X and Y"
import numpy as np
acc=A*self.X**2
acc+=B*self.X*self.Y
acc+=C*self.Y**2
acc+=D*self.X
acc+=E*self.Y
return np.exp(-0.5*acc+F)+b
class efficient_chisq_gaussClosedForm_for_fit :
"""class to compute 'efficiently' a chisq for a given gaussian distribution in closed form"""
def __init__(self,X,Y,Data) :
"defines a closed form gaussian for A,B,C,D,E,F,b"
import copy
self.X=copy.deepcopy(X)
self.Y=copy.deepcopy(Y)
self.Data=copy.deepcopy(Data)
def gauss(self,A,B,C,D,E,F,b) :
"computes for X and Y"
import numpy as np
acc=A*self.X**2
acc+=B*self.X*self.Y
acc+=C*self.Y**2
acc+=D*self.X
acc+=E*self.Y
return np.exp(-0.5*acc+F)+b
def residual(self,A,B,C,D,E,F,b) :
return (self.gauss(A,B,C,D,E,F,b)-self.Data)
def __call__(self,A,B,C,D,E,F,b) :
return (self.residual(A,B,C,D,E,F,b)**2).sum()
#class super_efficient_gaussClosedForm_forfit :
#def __init__(self,X,Y,V) :
#"defines a closed form gaussian for A,B,C,D,E,F,b"
#self.X=X
#self.Y=Y
#self.V=V
#def __call__(self,A,B,C,D,E,F,b) :
#"computes for X and Y"
#import numpy as np
#acc=A*self.X**2
#acc+=B*self.X*self.Y
#acc+=C*self.Y**2
#acc+=D*self.X
#acc+=E*self.Y
#return ((np.exp(-0.5*acc+F)+b-V)**2).sum()
if __name__=='__main__' :
def TestOut(title,m1,GF,latex=False) :
if latex :
fmt = '{\\bf %11s} & %13e & %13e & %13e\\\\'
print '\\begin{tabular}{lccc}\n\\hline\\hline\n\\multicolumn{4}{c}{%s}\\\\\n\hline'%title
print "\\hline\n&\\multicolumn{1}{c}{{\\bf Input}}&\\multicolumn{1}{c}{{\\bf Fit}}&\\multicolumn{1}{c}{{\\bf Residual}}\\\\ \n \hline"
else :
fmt = '%11s : %13e %13e %13e'
print title
for k in ['gauss_peak','fwhm','fwhm_min','fwhm_max','ellipticity','psi_ell','R0'] :
name=k
if latex :
name='\\_'.join(name.split('_'))
if k=='R0' :
print fmt%('X0',m1.__dict__[k][0],GF.__dict__[k][0],GF.__dict__[k][0]-m1.__dict__[k][0])
print fmt%('Y0',m1.__dict__[k][1],GF.__dict__[k][1],GF.__dict__[k][1]-m1.__dict__[k][1])
else :
print fmt%(name,m1.__dict__[k],GF.__dict__[k],GF.__dict__[k]-m1.__dict__[k])
if latex :
print '\\hline\\hline\n &&&\\\\ \n \\end{tabular}'
print
else :
print
print "\nA test\n"
latex=True
m1=Model(-1.5e-2,1.5e-2,301,-1.5e-2,1.5e-2,301)
pxl=m1.dX
m1(1.,0.,0.,0.,30.*pxl,1.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Simmetric, centered',m1,GF,latex=latex)
m1(1.,0,0.1*pxl,0.,30.*pxl,1.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Simmetric, North',m1,GF,latex=latex)
m1(1.,0.,-0.1*pxl,0.,30.*pxl,1.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Simmetric, West',m1,GF,latex=latex)
m1(1.,0,-0.1*pxl,0.,30.*pxl,1.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Simmetric, South',m1,GF,latex=latex)
m1(1.,0.1*pxl,0.,0.,30.*pxl,1.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Simmetric, East',m1,GF,latex=latex)
m1(1.,0.,0.,0.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, Center',m1,GF,latex=latex)
m1(1.,0.,0.1*pxl,0.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, North',m1,GF,latex=latex)
m1(1.,0.,-0.1*pxl,0.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, West',m1,GF,latex=latex)
m1(1.,0,-0.1*pxl,0.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, South',m1,GF,latex=latex)
m1(1.,0.1*pxl,0,0.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, East',m1,GF,latex=latex)
m1(1.,0.*pxl,0*pxl,45.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, Center, Rotated 45 deg',m1,GF,latex=latex)
m1(1.,0.*pxl,0*pxl,90.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, Center, Rotated 90 deg',m1,GF,latex=latex)
m1(1.,0.*pxl,0*pxl,-45.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, Center, Rotated -45 deg',m1,GF,latex=latex)
m1(1.,0.*pxl,0*pxl,-89.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, Center, Rotated -89 deg',m1,GF,latex=latex)
m1(1.,0.1*pxl,-0.1*pxl,-45.,30.*pxl,2.,MinMax=False)
GF=NoBackground_Base(m1.X,m1.Y,m1.D)
TestOut('Asimmetric, South East, Rotated -45 deg',m1,GF,latex=latex)
"""
Example of chisq fitting using iminuit
from numpy import *
from iminuit.util import make_func_code, describe
import iminuit
x=zeros([601,601]) ; y=zeros([601,601])
for k in range(601) : x[:,k]=float(k-300)
for k in range(601) : y[k,:]=float(k-300)
x.shape=x.size;y.shape=y.size;GG=GaussFit.efficient_gaussClosedForm_for_fit(x,y);KSQ=GaussFit.efficient_chisq_gaussClosedForm_for_fit(x,y,GG(0.001,0.,0.002,0.,0.,0.,1.)+randn(x.size)*0.1)
mKSQ=iminuit.Minuit(KSQ,D=0,E=0,fix_D=True,fix_E=True,A=0.005,B=0.005,C=0.005,F=0.,b=0.9)
mKSQ.migrad()
"""
| gpl-2.0 |
dimarkov/seaborn | seaborn/rcmod.py | 19 | 15751 | """Functions that alter the matplotlib rc dictionary on the fly."""
import numpy as np
import matplotlib as mpl
from . import palettes
_style_keys = (
"axes.facecolor",
"axes.edgecolor",
"axes.grid",
"axes.axisbelow",
"axes.linewidth",
"axes.labelcolor",
"figure.facecolor",
"grid.color",
"grid.linestyle",
"text.color",
"xtick.color",
"ytick.color",
"xtick.direction",
"ytick.direction",
"xtick.major.size",
"ytick.major.size",
"xtick.minor.size",
"ytick.minor.size",
"legend.frameon",
"legend.numpoints",
"legend.scatterpoints",
"lines.solid_capstyle",
"image.cmap",
"font.family",
"font.sans-serif",
)
_context_keys = (
"figure.figsize",
"font.size",
"axes.labelsize",
"axes.titlesize",
"xtick.labelsize",
"ytick.labelsize",
"legend.fontsize",
"grid.linewidth",
"lines.linewidth",
"patch.linewidth",
"lines.markersize",
"lines.markeredgewidth",
"xtick.major.width",
"ytick.major.width",
"xtick.minor.width",
"ytick.minor.width",
"xtick.major.pad",
"ytick.major.pad"
)
def set(context="notebook", style="darkgrid", palette="deep",
font="sans-serif", font_scale=1, color_codes=False, rc=None):
"""Set aesthetic parameters in one step.
Each set of parameters can be set directly or temporarily, see the
referenced functions below for more information.
Parameters
----------
context : string or dict
Plotting context parameters, see :func:`plotting_context`
style : string or dict
Axes style parameters, see :func:`axes_style`
palette : string or sequence
Color palette, see :func:`color_palette`
font : string
Font family, see matplotlib font manager.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
rc : dict or None
Dictionary of rc parameter mappings to override the above.
"""
set_context(context, font_scale)
set_style(style, rc={"font.family": font})
set_palette(palette, color_codes=color_codes)
if rc is not None:
mpl.rcParams.update(rc)
def reset_defaults():
"""Restore all RC params to default settings."""
mpl.rcParams.update(mpl.rcParamsDefault)
def reset_orig():
"""Restore all RC params to original settings (respects custom rc)."""
mpl.rcParams.update(mpl.rcParamsOrig)
class _AxesStyle(dict):
"""Light wrapper on a dict to set style temporarily."""
def __enter__(self):
"""Open the context."""
rc = mpl.rcParams
self._orig_style = {k: rc[k] for k in _style_keys}
set_style(self)
return self
def __exit__(self, *args):
"""Close the context."""
set_style(self._orig_style)
class _PlottingContext(dict):
"""Light wrapper on a dict to set context temporarily."""
def __enter__(self):
"""Open the context."""
rc = mpl.rcParams
self._orig_context = {k: rc[k] for k in _context_keys}
set_context(self)
return self
def __exit__(self, *args):
"""Close the context."""
set_context(self._orig_context)
def axes_style(style=None, rc=None):
"""Return a parameter dict for the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
This function returns an object that can be used in a ``with`` statement
to temporarily change the style parameters.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
Examples
--------
>>> st = axes_style("whitegrid")
>>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
>>> import matplotlib.pyplot as plt
>>> with axes_style("white"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_style : set the matplotlib parameters for a seaborn theme
plotting_context : return a parameter dict to to scale plot elements
color_palette : define the color palette for a plot
"""
if style is None:
style_dict = {k: mpl.rcParams[k] for k in _style_keys}
elif isinstance(style, dict):
style_dict = style
else:
styles = ["white", "dark", "whitegrid", "darkgrid", "ticks"]
if style not in styles:
raise ValueError("style must be one of %s" % ", ".join(styles))
# Define colors here
dark_gray = ".15"
light_gray = ".8"
# Common parameters
style_dict = {
"figure.facecolor": "white",
"text.color": dark_gray,
"axes.labelcolor": dark_gray,
"legend.frameon": False,
"legend.numpoints": 1,
"legend.scatterpoints": 1,
"xtick.direction": "out",
"ytick.direction": "out",
"xtick.color": dark_gray,
"ytick.color": dark_gray,
"axes.axisbelow": True,
"image.cmap": "Greys",
"font.family": ["sans-serif"],
"font.sans-serif": ["Arial", "Liberation Sans",
"Bitstream Vera Sans", "sans-serif"],
"grid.linestyle": "-",
"lines.solid_capstyle": "round",
}
# Set grid on or off
if "grid" in style:
style_dict.update({
"axes.grid": True,
})
else:
style_dict.update({
"axes.grid": False,
})
# Set the color of the background, spines, and grids
if style.startswith("dark"):
style_dict.update({
"axes.facecolor": "#EAEAF2",
"axes.edgecolor": "white",
"axes.linewidth": 0,
"grid.color": "white",
})
elif style == "whitegrid":
style_dict.update({
"axes.facecolor": "white",
"axes.edgecolor": light_gray,
"axes.linewidth": 1,
"grid.color": light_gray,
})
elif style in ["white", "ticks"]:
style_dict.update({
"axes.facecolor": "white",
"axes.edgecolor": dark_gray,
"axes.linewidth": 1.25,
"grid.color": light_gray,
})
# Show or hide the axes ticks
if style == "ticks":
style_dict.update({
"xtick.major.size": 6,
"ytick.major.size": 6,
"xtick.minor.size": 3,
"ytick.minor.size": 3,
})
else:
style_dict.update({
"xtick.major.size": 0,
"ytick.major.size": 0,
"xtick.minor.size": 0,
"ytick.minor.size": 0,
})
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _style_keys}
style_dict.update(rc)
# Wrap in an _AxesStyle object so this can be used in a with statement
style_object = _AxesStyle(style_dict)
return style_object
def set_style(style=None, rc=None):
"""Set the aesthetic style of the plots.
This affects things like the color of the axes, whether a grid is
enabled by default, and other aesthetic elements.
Parameters
----------
style : dict, None, or one of {darkgrid, whitegrid, dark, white, ticks}
A dictionary of parameters or the name of a preconfigured set.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
style dictionaries. This only updates parameters that are
considered part of the style definition.
Examples
--------
>>> set_style("whitegrid")
>>> set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
See Also
--------
axes_style : return a dict of parameters or use in a ``with`` statement
to temporarily set the style.
set_context : set parameters to scale plot elements
set_palette : set the default color palette for figures
"""
style_object = axes_style(style, rc)
mpl.rcParams.update(style_object)
def plotting_context(context=None, font_scale=1, rc=None):
"""Return a parameter dict to scale elements of the figure.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
This function returns an object that can be used in a ``with`` statement
to temporarily change the context parameters.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> c = plotting_context("poster")
>>> c = plotting_context("notebook", font_scale=1.5)
>>> c = plotting_context("talk", rc={"lines.linewidth": 2})
>>> import matplotlib.pyplot as plt
>>> with plotting_context("paper"):
... f, ax = plt.subplots()
... ax.plot(x, y) # doctest: +SKIP
See Also
--------
set_context : set the matplotlib parameters to scale plot elements
axes_style : return a dict of parameters defining a figure style
color_palette : define the color palette for a plot
"""
if context is None:
context_dict = {k: mpl.rcParams[k] for k in _context_keys}
elif isinstance(context, dict):
context_dict = context
else:
contexts = ["paper", "notebook", "talk", "poster"]
if context not in contexts:
raise ValueError("context must be in %s" % ", ".join(contexts))
# Set up dictionary of default parameters
base_context = {
"figure.figsize": np.array([8, 5.5]),
"font.size": 12,
"axes.labelsize": 11,
"axes.titlesize": 12,
"xtick.labelsize": 10,
"ytick.labelsize": 10,
"legend.fontsize": 10,
"grid.linewidth": 1,
"lines.linewidth": 1.75,
"patch.linewidth": .3,
"lines.markersize": 7,
"lines.markeredgewidth": 0,
"xtick.major.width": 1,
"ytick.major.width": 1,
"xtick.minor.width": .5,
"ytick.minor.width": .5,
"xtick.major.pad": 7,
"ytick.major.pad": 7,
}
# Scale all the parameters by the same factor depending on the context
scaling = dict(paper=.8, notebook=1, talk=1.3, poster=1.6)[context]
context_dict = {k: v * scaling for k, v in base_context.items()}
# Now independently scale the fonts
font_keys = ["axes.labelsize", "axes.titlesize", "legend.fontsize",
"xtick.labelsize", "ytick.labelsize", "font.size"]
font_dict = {k: context_dict[k] * font_scale for k in font_keys}
context_dict.update(font_dict)
# Implement hack workaround for matplotlib bug
# See https://github.com/mwaskom/seaborn/issues/344
# There is a bug in matplotlib 1.4.2 that makes points invisible when
# they don't have an edgewidth. It will supposedly be fixed in 1.4.3.
if mpl.__version__ == "1.4.2":
context_dict["lines.markeredgewidth"] = 0.01
# Override these settings with the provided rc dictionary
if rc is not None:
rc = {k: v for k, v in rc.items() if k in _context_keys}
context_dict.update(rc)
# Wrap in a _PlottingContext object so this can be used in a with statement
context_object = _PlottingContext(context_dict)
return context_object
def set_context(context=None, font_scale=1, rc=None):
"""Set the plotting context parameters.
This affects things like the size of the labels, lines, and other
elements of the plot, but not the overall style. The base context
is "notebook", and the other contexts are "paper", "talk", and "poster",
which are version of the notebook parameters scaled by .8, 1.3, and 1.6,
respectively.
Parameters
----------
context : dict, None, or one of {paper, notebook, talk, poster}
A dictionary of parameters or the name of a preconfigured set.
font_scale : float, optional
Separate scaling factor to independently scale the size of the
font elements.
rc : dict, optional
Parameter mappings to override the values in the preset seaborn
context dictionaries. This only updates parameters that are
considered part of the context definition.
Examples
--------
>>> set_context("paper")
>>> set_context("talk", font_scale=1.4)
>>> set_context("talk", rc={"lines.linewidth": 2})
See Also
--------
plotting_context : return a dictionary of rc parameters, or use in
a ``with`` statement to temporarily set the context.
set_style : set the default parameters for figure style
set_palette : set the default color palette for figures
"""
context_object = plotting_context(context, font_scale, rc)
mpl.rcParams.update(context_object)
def set_palette(palette, n_colors=None, desat=None, color_codes=False):
"""Set the matplotlib color cycle using a seaborn palette.
Parameters
----------
palette : hls | husl | matplotlib colormap | seaborn color palette
Palette definition. Should be something that :func:`color_palette`
can process.
n_colors : int
Number of colors in the cycle. The default number of colors will depend
on the format of ``palette``, see the :func:`color_palette`
documentation for more information.
desat : float
Proportion to desaturate each color by.
color_codes : bool
If ``True`` and ``palette`` is a seaborn palette, remap the shorthand
color codes (e.g. "b", "g", "r", etc.) to the colors from this palette.
Examples
--------
>>> set_palette("Reds")
>>> set_palette("Set1", 8, .75)
See Also
--------
color_palette : build a color palette or set the color cycle temporarily
in a ``with`` statement.
set_context : set parameters to scale plot elements
set_style : set the default parameters for figure style
"""
colors = palettes.color_palette(palette, n_colors, desat)
mpl.rcParams["axes.color_cycle"] = list(colors)
mpl.rcParams["patch.facecolor"] = colors[0]
if color_codes:
palettes.set_color_codes(palette)
| bsd-3-clause |
pravsripad/jumeg | jumeg/connectivity/con_viz.py | 2 | 71476 | #!/usr/bin/env python
""" Visualization functions for connectivity analysis. """
import sys
import os.path as op
from itertools import cycle
from functools import partial
import numpy as np
import scipy as sci
from mne.viz.utils import plt_show
from mne.viz.circle import (circular_layout, _plot_connectivity_circle_onpick)
import yaml
import time
def sensor_connectivity_3d(raw, picks, con, idx, n_con=20, min_dist=0.05,
scale_factor=0.005, tube_radius=0.001,
title='Sensor connectivity', vmin=None, vmax=None,
out_fname='sensor_connectivity.png'):
""" Function to plot sensor connectivity showing strongest
connections(n_con) excluding sensors that are less than min_dist apart.
https://github.com/mne-tools/mne-python/blob/master/examples/connectivity/plot_sensor_connectivity.py
Parameters
----------
raw : Raw object
Instance of mne.io.Raw
picks : list
Picks to be included.
con : ndarray (n_channels, n_channels)
Connectivity matrix.
idx : list
List of indices of sensors of interest.
n_con : int
Number of connections of interest.
min_dist : float
Minimum distance between sensors allowed.
Note: Please modify scale factor and tube radius to appropriate sizes
if the plot looks scrambled.
"""
# Now, visualize the connectivity in 3D
try:
from enthought.mayavi import mlab
except:
from mayavi import mlab
# mlab.figure(size=(600, 600), bgcolor=(0.5, 0.5, 0.5))
mlab.figure(size=(600, 600), bgcolor=(1, 1, 1), fgcolor=(0.5, 0.5, 0.5))
# Plot the sensor location
sens_loc = [raw.info['chs'][picks[i]]['loc'][:3] for i in idx]
sens_loc = np.array(sens_loc)
pts = mlab.points3d(sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2],
color=(0.5, 0.5, 0.5), opacity=1, scale_factor=scale_factor)
# do the distance based thresholding first
import itertools
for (i, j) in itertools.combinations(list(range(247)), 2):
# print sci.linalg.norm(sens_loc[i] - sens_loc[j])
if sci.linalg.norm(sens_loc[i] - sens_loc[j]) < min_dist:
con[i, j] = con[j, i ] = 0.
# Get the strongest connections
threshold = np.sort(con, axis=None)[-n_con]
assert threshold > 0.0, 'No surviving connections.'
ii, jj = np.where(con >= threshold)
# Remove close connections
con_nodes = list()
con_val = list()
for i, j in zip(ii, jj):
if sci.linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist:
con_nodes.append((i, j))
con_val.append(con[i, j])
con_val = np.array(con_val)
print(con_val.shape)
# Show the connections as tubes between sensors
if not vmax:
vmax = np.max(con_val)
if not vmin:
vmin = np.min(con_val)
print(vmin, vmax)
for val, nodes in zip(con_val, con_nodes):
x1, y1, z1 = sens_loc[nodes[0]]
x2, y2, z2 = sens_loc[nodes[1]]
points = mlab.plot3d([x1, x2], [y1, y2], [z1, z2], [val, val],
vmin=vmin, vmax=vmax, tube_radius=tube_radius,
colormap='Blues')
points.module_manager.scalar_lut_manager.reverse_lut = False
mlab.scalarbar(title=title, nb_labels=2)
# Add the sensor names for the connections shown
nodes_shown = list(set([n[0] for n in con_nodes] +
[n[1] for n in con_nodes]))
for node in nodes_shown:
x, y, z = sens_loc[node]
mlab.text3d(x, y, z, raw.ch_names[picks[node]], scale=scale_factor,
color=(0, 0, 0))
view = (-88.7, 40.8, 0.76, np.array([-3.9e-4, -8.5e-3, -1e-2]))
mlab.view(*view)
time.sleep(1)
mlab.savefig(out_fname)
time.sleep(1)
mlab.close()
def plot_connectivity_circle(con, node_names, indices=None, n_lines=None,
node_angles=None, node_width=None,
node_colors=None, facecolor='black',
textcolor='white', node_edgecolor='black',
linewidth=1.5, colormap='hot', vmin=None,
vmax=None, colorbar=True, title=None, title_pad=15,
colorbar_size=0.2, colorbar_pos=(-0.25, 0.05),
symmetric_cbar=False, fontsize_title=12,
fontsize_names=8, fontsize_colorbar=8, padding=6.,
fig=None, subplot=111, interactive=True,
node_linewidth=2., show=True, arrow=False,
arrowstyle='->,head_length=0.7,head_width=0.4',
ignore_diagonal=True, **kwargs):
"""Visualize connectivity as a circular graph.
Note: This code is based on the circle graph example by Nicolas P. Rougier
http://www.labri.fr/perso/nrougier/coding/.
Parameters
----------
con : np.array
Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
array is provided, "indices" has to be used to define the connection
indices.
node_names : list of str
Node names. The order corresponds to the order in con.
indices : tuple of arrays | None
Two arrays with indices of connections for which the connections
strenghts are defined in con. Only needed if con is a 1D array.
n_lines : int | None
If not None, only the n_lines strongest connections (strength=abs(con))
are drawn.
node_angles : np.array, shape=(len(node_names,)) | None
Array with node positions in degrees. If None, the nodes are equally
spaced on the circle. See mne.viz.circular_layout.
node_width : float | None
Width of each node in degrees. If None, the minimum angle between any
two nodes is used as the width.
node_colors : list of tuples | list of str
List with the color to use for each node. If fewer colors than nodes
are provided, the colors will be repeated. Any color supported by
matplotlib can be used, e.g., RGBA tuples, named colors.
facecolor : str
Color to use for background. See matplotlib.colors.
textcolor : str
Color to use for text. See matplotlib.colors.
node_edgecolor : str
Color to use for lines around nodes. See matplotlib.colors.
linewidth : float
Line width to use for connections.
colormap : str
Colormap to use for coloring the connections.
vmin : float | None
Minimum value for colormap. If None, it is determined automatically.
vmax : float | None
Maximum value for colormap. If None, it is determined automatically.
colorbar : bool
Display a colorbar or not.
title : str
The figure title.
title_pad : float
The offset of the title from the top of the axes, in points.
matplotlib default is None to use rcParams['axes.titlepad'].
colorbar_size : float
Size of the colorbar.
colorbar_pos : 2-tuple
Position of the colorbar.
symmetric_cbar : bool
Symmetric colorbar around 0.
fontsize_title : int
Font size to use for title.
fontsize_names : int
Font size to use for node names.
fontsize_colorbar : int
Font size to use for colorbar.
padding : float
Space to add around figure to accommodate long labels.
fig : None | instance of matplotlib.pyplot.Figure
The figure to use. If None, a new figure with the specified background
color will be created.
subplot : int | 3-tuple
Location of the subplot when creating figures with multiple plots. E.g.
121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
matplotlib.pyplot.subplot.
interactive : bool
When enabled, left-click on a node to show only connections to that
node. Right-click shows all connections.
node_linewidth : float
Line with for nodes.
show : bool
Show figure if True.
arrow: bool
Include arrows at end of connection.
arrowstyle: str
The style params of the arrow head to be drawn.
ignore_diagonal: bool
Plot the values on the diagonal (i.e., show intra-
node connections).
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure handle.
axes : instance of matplotlib.axes.PolarAxesSubplot
The subplot handle.
Code taken from mne-python v0.14.
URL: https://github.com/mne-tools/mne-python
"""
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
n_nodes = len(node_names)
if node_angles is not None:
if len(node_angles) != n_nodes:
raise ValueError('node_angles has to be the same length '
'as node_names')
# convert it to radians
node_angles = node_angles * np.pi / 180
else:
# uniform layout on unit circle
node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
if node_width is None:
# widths correspond to the minimum angle between two nodes
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
if node_colors is not None:
if len(node_colors) < n_nodes:
node_colors = cycle(node_colors)
else:
# assign colors using colormap
node_colors = [plt.cm.Spectral(i / float(n_nodes))
for i in range(n_nodes)]
# handle 1D and 2D connectivity information
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
# we use the lower-triangular part
is_symmetric = np.all(np.abs(con - con.T) < 1e-8)
if is_symmetric:
if ignore_diagonal:
indices = np.tril_indices(n_nodes, -1)
else:
indices = np.tril_indices(n_nodes, 0)
else:
if not arrow:
import warnings
warnings.warn('Since the con matrix is asymmetric it will be '
'treated as a causality matrix and arrow will '
'be set to True.', Warning)
arrow = True
# get off-diagonal indices
if ignore_diagonal:
indices = np.where(~np.eye(con.shape[0], dtype=bool))
else:
indices = np.where(np.ones(con.shape, dtype=bool))
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
# get the colormap
if isinstance(colormap, str):
colormap = plt.get_cmap(colormap)
# Make figure background the same colors as axes
if fig is None:
fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
# Use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
axes = plt.subplot(*subplot, polar=True, facecolor=facecolor)
# No ticks, we'll put our own
plt.xticks([])
plt.yticks([])
# Set y axes limit, add additional space if requested
plt.ylim(0, 10 + padding)
# Remove the black axes border which may obscure the labels
axes.spines['polar'].set_visible(False)
con_abs = np.abs(con)
n_nonzero_cons = len(np.where(con_abs)[0])
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
if n_nonzero_cons > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
elif n_nonzero_cons > 0:
con_thresh = np.sort(np.abs(con).ravel())[-n_nonzero_cons]
else:
# there are no significant connections, set minimum threshold to
# avoid plotting everything
con_thresh = 0.001
else:
if n_nonzero_cons > 0:
con_thresh = con_abs[np.where(con_abs)].min()
else:
# there are no significant connections, set minimum threshold to
# avoid plotting everything
con_thresh = 0.001
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
del con_abs
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# Get vmin vmax for color scaling
if vmin is None:
if n_nonzero_cons > 0:
vmin = np.min(con)
else:
vmin = 0.
if vmax is None:
if n_nonzero_cons > 0:
vmax = np.max(con)
else:
vmax = 0.2
if symmetric_cbar:
if np.fabs(vmin) > np.fabs(vmax):
vmin = -np.fabs(vmin)
vmax = np.fabs(vmin)
else:
vmin = -np.fabs(vmax)
vmax = np.fabs(vmax)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initialize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(seed=0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (end, start) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# Finally, we draw the connections
# in SCoT, cau_ij represents causal influence from j to i
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[j], 10
# End point
if arrow:
# make shorter to accommodate arrowhead
t1, r1 = node_angles[i], 9
else:
t1, r1 = node_angles[i], 10
if i != j:
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
else:
verts = [(t0, r0), (t0 + 20 / 180 * np.pi, 5),
(t1 - 20 / 180 * np.pi, 5), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
if arrow:
# add an arrow to the patch
patch = m_patches.FancyArrowPatch(path=path,
arrowstyle=arrowstyle,
fill=False, edgecolor=color,
mutation_scale=10,
linewidth=linewidth, alpha=1.)
else:
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
axes.add_patch(patch)
# Draw ring with colored nodes
height = np.ones(n_nodes) * 1.0
bars = axes.bar(node_angles, height, width=node_width, bottom=9,
edgecolor=node_edgecolor, lw=node_linewidth,
facecolor='.9', align='center')
for bar, color in zip(bars, node_colors):
bar.set_facecolor(color)
# Draw node labels
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
axes.text(angle_rad, 10.4, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=axes, pad=title_pad)
if colorbar:
sm = plt.cm.ScalarMappable(cmap=colormap,
norm=plt.Normalize(vmin, vmax))
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
shrink=colorbar_size,
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
# Add callback for interaction
if interactive:
callback = partial(_plot_connectivity_circle_onpick, fig=fig,
axes=axes, indices=indices, n_nodes=n_nodes,
node_angles=node_angles)
fig.canvas.mpl_connect('button_press_event', callback)
plt_show(show)
return fig, axes
def plot_grouped_connectivity_circle(yaml_fname, con, orig_labels,
replacer_dict, labels_mode=None,
node_order_size=68, indices=None,
out_fname='circle.png', title=None,
subplot=111, include_legend=False,
n_lines=None, fig=None, show=True,
vmin=None, vmax=None, colormap='hot',
colorbar=False, colorbar_pos=(-0.25, 0.05),
symmetric_cbar=False, bbox_inches=None,
tight_layout=None, cortex_colors=None,
**kwargs):
"""
Plot the connectivity circle grouped and ordered according to
groups in the yaml input file provided.
yaml_fname: str
A file in the yaml format that provides information on how to group
the labels for the circle plot. Soem grouping examples are provided at
jumeg/data/*_grouping.yaml.
con: ndarray
Connectivity matrix to be plotted.
orig_labels : list of str
The original label names in the order as appears in con.
replacer_dict: dict
A dictionary that provides a one to one match between original label
name and a group name. The group name is plotted at the location of the
original label name provided.
labels_mode : str | None
'blank': Plots no labels on the circle plot,
'replace': Replace original labels with labels in replacer_dict. If
the label is not in replacer_dict it is replaced with a blank.
'replace_no_hemi': Same as 'replace' but without hemisphere indicators.
None: Plots all of the orig_label names provided.
bbox_inches : None | 'tight'
tight_layout : bool
out_fname: str
Filename of the saved figure.
For other keyword arguments please refer to docstring for
plot_connectivity_circle.
NOTE: yaml order fix helps preserves the order of entries in the yaml file.
"""
import matplotlib.pyplot as plt
# read the yaml file with grouping
if op.isfile(yaml_fname):
with open(yaml_fname, 'r') as f:
labels = yaml.safe_load(f)
else:
print('%s - File not found.' % yaml_fname)
sys.exit()
node_angles, node_colors = _get_group_node_angles_and_colors(labels, orig_labels,
node_order_size, cortex_colors=cortex_colors)
my_labels = _get_circular_plot_labels(labels_mode, orig_labels, replacer_dict)
# Plot the graph using node_order and colours
# orig_labels is the order of nodes in the con matrix (important)
fig, axes = plot_connectivity_circle(con, my_labels, n_lines=n_lines,
facecolor='white', textcolor='black',
node_angles=node_angles, colormap=colormap,
node_colors=node_colors,
node_edgecolor='white', fig=fig,
vmax=vmax, vmin=vmin, colorbar_size=0.2,
colorbar_pos=colorbar_pos,
colorbar=colorbar, symmetric_cbar=symmetric_cbar,
show=show, subplot=subplot, indices=indices,
title=title, **kwargs)
if include_legend:
import matplotlib.patches as mpatches
# yaml order fix
legend_patches = [mpatches.Patch(color=col, label=list(llab.keys())[0])
for col, llab in zip(['g', 'r', 'c', 'y', 'b', 'm'], labels)]
# legend_patches = [mpatches.Patch(color=col, label=key)
# for col, key in zip(['g', 'r', 'c', 'y', 'b', 'm'],
# labels.keys())]
plt.legend(handles=legend_patches, loc=3, ncol=1,
mode=None, fontsize='medium')
if tight_layout:
fig.tight_layout()
if out_fname:
fig.savefig(out_fname, facecolor='white',
dpi=600, bbox_inches=bbox_inches)
return fig
def _get_circular_plot_labels(labels_mode, orig_labels, replacer_dict):
"""
Parameters:
-----------
labels_mode : str | None
'blank': Plots no labels on the circle plot,
'replace': Replace original labels with labels in replacer_dict. If
the label is not in replacer_dict it is replaced with a blank.
'replace_no_hemi': Same as 'replace' but without hemisphere indicators.
None: Plots all of the orig_label names provided.
orig_labels : list of str
Label names in the order as appears in con.
replacer_dict :
Dictionary to replace the individual label names with cortex
names.
Returns:
--------
my_labels : list of str
The label names used in the circular plot.
"""
# labels mode decides the labels printed for each of the nodes
if labels_mode == 'blank':
# show nothing, only the empty circle plot
my_labels = ['' for _ in orig_labels]
elif labels_mode == 'replace':
if not isinstance(replacer_dict, dict):
raise RuntimeError("labels_mode='replace' and replacer_dict not set.")
replaced_labels = []
for myl in orig_labels:
if myl.split('-lh')[0] in list(replacer_dict.keys()):
replaced_labels.append(replacer_dict[myl.split('-lh')[0]] + '-lh')
elif myl.split('-rh')[0] in list(replacer_dict.keys()):
replaced_labels.append(replacer_dict[myl.split('-rh')[0]] + '-rh')
else:
replaced_labels.append('')
my_labels = replaced_labels
elif labels_mode == 'replace_no_hemi':
if not isinstance(replacer_dict, dict):
raise RuntimeError("labels_mode='replace_no_hemi' and replacer_dict not set.")
replaced_labels = []
for myl in orig_labels:
if myl.split('-lh')[0] in list(replacer_dict.keys()):
replaced_labels.append(replacer_dict[myl.split('-lh')[0]])
elif myl.split('-rh')[0] in list(replacer_dict.keys()):
replaced_labels.append(replacer_dict[myl.split('-rh')[0]])
else:
replaced_labels.append('')
my_labels = replaced_labels
else:
# show all the node labels as originally given
my_labels = orig_labels
return my_labels
def _get_group_node_angles_and_colors(labels, orig_labels, node_order_size, cortex_colors=None):
if cortex_colors is None:
cortex_colors = ['m', 'b', 'y', 'c', 'r', 'g',
'g', 'r', 'c', 'y', 'b', 'm']
######################################################################
# Get labels in left and right hemisphere
######################################################################
label_names = list()
for lab in labels:
# label_names.extend(labels[lab])
label_names += list(lab.values())[0] # yaml order fix
lh_labels = [name + '-lh' for name in label_names if name + '-lh' in orig_labels]
rh_labels = [name + '-rh' for name in label_names if name + '-rh' in orig_labels]
######################################################################
# Get number of labels per group in a list
######################################################################
group_numbers = []
# left first in reverse order, then right hemi labels
for i in reversed(range(len(labels))):
cortical_region = list(labels[i].keys())[0]
actual_num_lh = len([rlab for rlab in labels[i][cortical_region] if rlab + '-lh' in lh_labels])
# print(cortical_region, actual_num_lh)
group_numbers.append(actual_num_lh)
for i in range(len(labels)):
cortical_region = list(labels[i].keys())[0]
actual_num_rh = len([rlab for rlab in labels[i][cortical_region] if rlab + '-rh' in rh_labels])
# print(cortical_region, actual_num_rh)
group_numbers.append(actual_num_rh)
assert np.sum(group_numbers) == len(orig_labels), 'Mismatch in number of labels when computing group boundaries.'
######################################################################
# assign a color and angle to each label based on the group
######################################################################
node_order = list()
node_order.extend(reversed(lh_labels)) # reverse the order
node_order.extend(rh_labels)
assert len(node_order) == node_order_size, 'Node order length is correct.'
node_angles, node_colors = _get_node_angles_and_colors(group_numbers, cortex_colors,
node_order, orig_labels)
return node_angles, node_colors
def _get_node_angles_and_colors(group_numbers, cortex_colors, node_order, orig_labels):
# the respective no. of regions in each cortex
group_boundaries = np.cumsum([0] + group_numbers)[:-1]
label_colors = []
for ind, rep in enumerate(group_numbers):
label_colors += [cortex_colors[ind]] * rep
assert len(label_colors) == len(node_order), 'Number of colours do not match'
# the order of the node_colors must match that of orig_labels
# therefore below reordering is necessary
node_colors = [label_colors[node_order.index(orig)] for orig in orig_labels]
node_angles = circular_layout(orig_labels, node_order, start_pos=90,
group_boundaries=group_boundaries)
return node_angles, node_colors
def plot_generic_grouped_circle(yaml_fname, con, orig_labels,
node_order_size,
out_fname='circle.png', title=None,
subplot=111, include_legend=False,
n_lines=None, fig=None, show=True,
vmin=None, vmax=None,
colorbar=False, **kwargs):
"""
Plot the connectivity circle grouped and ordered according to
groups in the yaml input file provided. This is not specific to aparc and
does not automatically split the labels into left and right hemispheres.
orig_labels : list of str
Label names in the order as appears in con.
NOTE: The order of entries in the yaml file is not preserved.
"""
import matplotlib.pyplot as pl
# read the yaml file with grouping
if op.isfile(yaml_fname):
with open(yaml_fname, 'r') as f:
labels = yaml.safe_load(f)
else:
print('%s - File not found.' % yaml_fname)
sys.exit()
cortex_colors = ['m', 'b', 'y', 'c', 'r', 'g']
# make list of label_names (without individual cortex locations)
label_names = list()
for lab in labels:
label_names.extend(labels[lab])
# here label_names are the node_order
node_order = label_names
assert len(node_order) == node_order_size, 'Node order length is correct.'
# the respective no. of regions in each cortex
group_numbers = [len(labels[key]) for key in list(labels.keys())]
node_angles, node_colors = _get_node_angles_and_colors(group_numbers, cortex_colors,
node_order, orig_labels)
# Plot the graph using node_order and colours
# orig_labels is the order on nodes in the con matrix (important)
plot_connectivity_circle(con, orig_labels, n_lines=n_lines,
facecolor='white', textcolor='black',
node_angles=node_angles,
node_colors=node_colors,
node_edgecolor='white', fig=fig,
fontsize_names=8, vmax=vmax, vmin=vmin,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
colorbar=colorbar, show=show, subplot=subplot,
title=title, **kwargs)
if include_legend:
import matplotlib.patches as mpatches
legend_patches = [mpatches.Patch(color=col, label=key)
for col, key in zip(['g', 'r', 'c', 'y', 'b', 'm'],
list(labels.keys()))]
pl.legend(handles=legend_patches, loc=(0.02, 0.02), ncol=1,
mode=None, fontsize='small')
if out_fname:
pl.savefig(out_fname, facecolor='white', dpi=600)
def get_vmin_vmax_causality(vmin, vmax, cau):
"""
Get the minimum and maximum off-diagonal values that
are different from 0.
Parameters:
-----------
vmin : None | float
If vmin is None, the minimum value is taken
from the data.
vmax : None | float
If vmax is None, the maximum value is taken
from the data.
cau : np.array of shape (n_rois, n_rois)
The causality data.
Returns:
--------
vmin : float
The minimum value.
vmax : float
The maximum value.
"""
# off diagonal elements
cau_od = cau[np.where(~np.eye(cau.shape[0], dtype=bool))]
if vmax is None:
vmax = cau_od.max()
if vmin is None:
if cau_od.max() == 0:
# no significant connections found
vmin = 0
vmax = 0.2
else:
# get minimum value that is different from 0
vmin = cau_od[np.where(cau_od)].min()
return vmin, vmax
def plot_grouped_causality_circle(caus, yaml_fname, label_names, n_lines=None,
labels_mode='cortex_only', title='Causal Metric',
out_fname='causality_circle.png', colormap='Blues',
figsize=(10, 6), show=False, colorbar=False, fig=None,
vmin=None, vmax=None, tight_layout=False, **kwargs):
vmin, vmax = get_vmin_vmax_causality(vmin, vmax, caus)
if not fig:
import matplotlib.pyplot as plt
fig = plt.figure(num=None, figsize=figsize)
fig = plot_grouped_connectivity_circle(yaml_fname, caus, label_names,
out_fname=out_fname, labels_mode=labels_mode,
node_order_size=len(label_names), show=show,
title=title, fig=fig, subplot=(1, 1, 1),
vmin=vmin, vmax=vmax, n_lines=n_lines,
colormap=colormap, colorbar=colorbar,
arrow=True, tight_layout=tight_layout, **kwargs)
return fig
def plot_degree_circle(degrees, yaml_fname, orig_labels_fname,
node_order_size=68, fig=None, subplot=111,
color='b', cmap='Blues', tight_layout=False,
alpha=0.5, fontsize_groups=6, textcolor_groups='k',
radsize=1., degsize=1, show_group_labels=True,
out_fname='degree.png', show=True):
"""
Given degree values of various nodes of a network, plot a grouped circle
plot a scatter plot around a circle.
"""
cortex_colors = ['m', 'b', 'y', 'c', 'r', 'g',
'g', 'r', 'c', 'y', 'b', 'm']
n_nodes = len(degrees)
with open(orig_labels_fname, 'r') as f:
orig_labels = yaml.safe_load(f)['label_names']
assert n_nodes == len(orig_labels), 'Mismatch in node names and number.'
# read the yaml file with grouping of the various nodes
if op.isfile(yaml_fname):
with open(yaml_fname, 'r') as f:
labels = yaml.safe_load(f)
else:
print('%s - File not found.' % yaml_fname)
sys.exit()
# make list of label_names (without individual cortex locations)
label_names = [list(lab.values())[0] for lab in labels]
label_names = [la for l in label_names for la in l]
lh_labels = [name + '-lh' for name in label_names]
rh_labels = [name + '-rh' for name in label_names]
# save the plot order
node_order = list()
node_order.extend(lh_labels[::-1]) # reverse the order
node_order.extend(rh_labels)
assert len(node_order) == node_order_size, 'Node order length is correct.'
# the respective no. of regions in each cortex
# yaml fix order change
group_numbers = [len(list(key.values())[0]) for key in labels]
group_numbers = group_numbers[::-1] + group_numbers
node_angles, node_colors = _get_node_angles_and_colors(group_numbers, cortex_colors,
node_order, orig_labels)
# prepare group label positions
group_labels = [list(lab.keys())[0] for lab in labels]
grp_lh_labels = [name + '-lh' for name in group_labels]
grp_rh_labels = [name + '-rh' for name in group_labels]
all_group_labels = grp_lh_labels + grp_rh_labels
# save the group order
group_node_order = list()
group_node_order.extend(grp_lh_labels[::-1])
group_node_order.extend(grp_rh_labels)
n_groups = len(group_node_order)
group_node_angles = circular_layout(group_node_order, group_node_order,
start_pos=90.)
import matplotlib.pyplot as plt
if fig is None:
fig = plt.figure()
# use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
ax = plt.subplot(*subplot, polar=True, facecolor='white')
# first plot the circle showing the degree
theta = np.deg2rad(node_angles)
radii = np.ones(len(node_angles)) * radsize
c = ax.scatter(theta, radii, c=node_colors, s=degrees * degsize,
cmap=None, alpha=alpha)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
ax.spines['polar'].set_visible(False)
# add group labels
if show_group_labels:
for i in range(group_node_angles.size):
# to modify the position of the labels
theta = group_node_angles[i] + np.pi/n_groups
ax.text(np.deg2rad(theta), radsize + radsize/5., group_node_order[i],
rotation=theta-90.,
size=fontsize_groups, horizontalalignment='center',
verticalalignment='center', color=textcolor_groups)
# to draw lines
# ax.bar(np.deg2rad(theta), 1, bottom=0., width=(np.pi/180), color='r')
# ax.text(np.deg2rad(theta), 1.2, all_group_labels[i])
if tight_layout:
fig.tight_layout()
if out_fname:
fig.savefig(out_fname, dpi=300.)
if show:
plt.show()
return fig, ax
def plot_lines_and_blobs(con, degrees, yaml_fname, orig_labels_fname,
replacer_dict,
node_order_size=68, fig=None, subplot=111,
color='b', cmap='Blues', tight_layout=False,
alpha=0.5, fontsize_groups=6, textcolor_groups='k',
radsize=1., degsize=1, labels_mode=None,
linewidth=1.5, n_lines=50, node_width=None,
arrow=False, out_fname='lines_and_blobs.png',
vmin=None, vmax=None, figsize=None,
fontsize_colorbar=8, textcolor='black',
fontsize_title=12, title=None, fontsize_names=6,
show_node_labels=False, colorbar=True,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
show=True, **kwargs):
'''
Plot connectivity circle plot with a centrality index per node shown as
blobs along the circulference of the circle, hence the lines and the blobs.
'''
import yaml
if isinstance(orig_labels_fname, str):
with open(orig_labels_fname, 'r') as f:
orig_labels = yaml.safe_load(f)['label_names']
else:
orig_labels = orig_labels_fname
n_nodes = len(degrees)
assert n_nodes == len(orig_labels), 'Mismatch in node names and number.'
assert n_nodes == len(con), 'Mismatch in n_nodes for con and degrees.'
# read the yaml file with grouping of the various nodes
if op.isfile(yaml_fname):
with open(yaml_fname, 'r') as f:
labels = yaml.safe_load(f)
else:
print('%s - File not found.' % yaml_fname)
sys.exit()
# make list of label_names (without individual cortex locations)
label_names = [list(lab.values())[0] for lab in labels]
label_names = [la for l in label_names for la in l]
lh_labels = [name + '-lh' for name in label_names if name + '-lh' in orig_labels]
rh_labels = [name + '-rh' for name in label_names if name + '-rh' in orig_labels]
# Save the plot order and create a circular layout
node_order = list()
node_order.extend(lh_labels[::-1]) # reverse the order
node_order.extend(rh_labels)
assert len(node_order) == node_order_size, 'Node order length is correct.'
group_bound = [0]
# left first in reverse order, then right hemi labels
for i in range(len(labels))[::-1]:
cortical_region = list(labels[i].keys())[0]
actual_num_lh = [rlab for rlab in labels[i][cortical_region] if rlab + '-lh' in lh_labels]
group_bound.append(len(actual_num_lh))
for i in range(len(labels)):
cortical_region = list(labels[i].keys())[0]
actual_num_rh = [rlab for rlab in labels[i][cortical_region] if rlab + '-rh' in rh_labels]
group_bound.append(len(actual_num_rh))
assert np.sum(group_bound) == len(orig_labels), 'Mismatch in number of labels when computing group boundaries.'
# the respective no. of regions in each cortex
# group_bound = [len(list(key.values())[0]) for key in labels] # yaml order fix
# group_bound = [0] + group_bound[::-1] + group_bound
group_boundaries = [sum(group_bound[:i+1]) for i in range(len(group_bound))]
# remove the first element of group_bound
# make label colours such that each cortex is of one colour
group_bound.pop(0)
# remove the last total sum of the list
group_boundaries.pop()
from mne.viz.circle import circular_layout
node_angles = circular_layout(orig_labels, node_order, start_pos=90,
group_boundaries=group_boundaries)
if node_width is None:
# widths correspond to the minimum angle between two nodes
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
# prepare group label positions
group_labels = [list(lab.keys())[0] for lab in labels]
grp_lh_labels = [name + '-lh' for name in group_labels]
grp_rh_labels = [name + '-rh' for name in group_labels]
all_group_labels = grp_lh_labels + grp_rh_labels
# save the group order
group_node_order = list()
group_node_order.extend(grp_lh_labels[::-1])
group_node_order.extend(grp_rh_labels)
n_groups = len(group_node_order)
group_node_angles = circular_layout(group_node_order, group_node_order,
start_pos=90.)
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
if fig is None:
fig = plt.figure(figsize=figsize)
# use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
ax = plt.subplot(*subplot, polar=True, facecolor='white')
cortex_colors = ['m', 'b', 'y', 'c', 'r', 'g',
'g', 'r', 'c', 'y', 'b', 'm']
label_colors = []
for ind, rep in enumerate(group_bound):
label_colors += [cortex_colors[ind]] * rep
assert len(label_colors) == len(node_order), 'Number of colours do not match'
# the order of the node_colors must match that of orig_labels
# therefore below reordering is necessary
reordered_colors = [label_colors[node_order.index(orig)]
for orig in orig_labels]
# first plot the circle showing the degree
theta = np.deg2rad(node_angles)
radii = np.ones(len(node_angles)) * radsize
c = ax.scatter(theta, radii, c=reordered_colors, s=degrees * degsize,
cmap=cmap, alpha=alpha)
ax.grid(False)
ax.set_xticks([])
ax.set_yticks([])
ax.spines['polar'].set_visible(False)
# handle 1D and 2D connectivity information
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
# we use the lower-triangular part
indices = np.tril_indices(n_nodes, -1)
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
else:
con_thresh = 0.
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_abs = np.abs(con)
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
con_abs = con_abs[sort_idx]
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# get the colormap
if isinstance(cmap, str):
colormap = plt.get_cmap(cmap)
# Get vmin vmax for color scaling
if vmin is None:
vmin = np.min(con[np.abs(con) >= con_thresh])
if vmax is None:
vmax = np.max(con)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initialize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(seed=0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (start, end) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# convert to radians for below code
node_angles = node_angles * np.pi / 180
# Finally, we draw the connections
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[i], radsize - 0.05
# End point
if arrow:
# make shorter to accommodate arrowhead
t1, r1 = node_angles[j], radsize - 1.
else:
t1, r1 = node_angles[j], radsize - 0.05
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, radsize/2.), (t1, radsize/2.), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
if arrow:
# add an arrow to the patch
patch = m_patches.FancyArrowPatch(path=path,
arrowstyle=arrowstyle,
fill=False, edgecolor=color,
mutation_scale=10,
linewidth=linewidth, alpha=1.)
else:
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
ax.add_patch(patch)
# labels mode decides the labels printed for each of the nodes
if labels_mode == 'blank':
# show nothing, only the empty circle plot
my_labels = ['' for orig in orig_labels]
elif labels_mode == 'cortex_only':
if isinstance(replacer_dict, dict):
# show only the names of cortex areas on one representative node
replacer = replacer_dict
else:
raise RuntimeError('Replacer dict with cortex names not set, \
cannot choose cortex_only labels_mode.')
replaced_labels = []
for myl in orig_labels:
if myl.split('-lh')[0] in list(replacer.keys()):
replaced_labels.append(replacer[myl.split('-lh')[0]] + '-lh')
elif myl.split('-rh')[0] in list(replacer.keys()):
replaced_labels.append(replacer[myl.split('-rh')[0]] + '-rh')
else:
replaced_labels.append('')
my_labels = replaced_labels
else:
# show all the node labels as originally given
my_labels = orig_labels
# draw node labels
if show_node_labels:
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(my_labels, node_angles,
angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
ax.text(angle_rad, radsize + 0.2, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color='k')
# # add group labels
# if show_group_labels:
# for i in range(group_node_angles.size):
# # to modify the position of the labels
# theta = group_node_angles[i] + np.pi/n_groups
# ax.text(np.deg2rad(theta), radsize + radsize/5., group_node_order[i],
# rotation=theta-90.,
# size=fontsize_groups, horizontalalignment='center',
# verticalalignment='center', color=textcolor_groups)
if colorbar:
sm = plt.cm.ScalarMappable(cmap=colormap,
norm=plt.Normalize(vmin, vmax))
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=ax, use_gridspec=False,
shrink=colorbar_size,
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=ax)
if show:
plt.show()
# if tight_layout:
# fig.tight_layout()
# if out_fname:
# fig.savefig(out_fname, dpi=300.)
return fig, ax
def _plot_connectivity_circle_group_bars(con, node_names,
indices=None, n_lines=None,
node_angles=None, node_width=None,
node_colors=None, facecolor='black',
textcolor='white', node_edgecolor='black',
linewidth=1.5, colormap='hot', vmin=None,
vmax=None, colorbar=True, title=None,
group_node_order=None, group_node_angles=None,
group_node_width=None, group_colors=None,
fontsize_groups=8,
colorbar_size=0.2, colorbar_pos=(-0.3, 0.1),
fontsize_title=12, fontsize_names=8,
fontsize_colorbar=8, padding=6.,
fig=None, subplot=111,
node_linewidth=2., show=True):
"""Visualize connectivity as a circular graph.
Circle connectivity plot with external labels ring with group names.
Note: This code is based on the circle graph example by Nicolas P. Rougier
http://www.labri.fr/perso/nrougier/coding/.
Parameters
----------
con : array
Connectivity scores. Can be a square matrix, or a 1D array. If a 1D
array is provided, "indices" has to be used to define the connection
indices.
node_names : list of str
Node names. The order corresponds to the order in con.
indices : tuple of arrays | None
Two arrays with indices of connections for which the connections
strenghts are defined in con. Only needed if con is a 1D array.
n_lines : int | None
If not None, only the n_lines strongest connections (strength=abs(con))
are drawn.
node_angles : array, shape=(len(node_names,)) | None
Array with node positions in degrees. If None, the nodes are equally
spaced on the circle. See mne.viz.circular_layout.
node_width : float | None
Width of each node in degrees. If None, the minimum angle between any
two nodes is used as the width.
node_colors : list of tuples | list of str
List with the color to use for each node. If fewer colors than nodes
are provided, the colors will be repeated. Any color supported by
matplotlib can be used, e.g., RGBA tuples, named colors.
group_node_order : list of str
Group node names in correct order.
group_node_angles : array, shape=(len(group_node_order,)) | None
Array with node positions in degrees. If None, the nodes are equally
spaced on the circle. See mne.viz.circular_layout.
group_node_width : float | None
Width of each group node in degrees. If None, the minimum angle between
any two nodes is used as the width.
group_colors : None
List with colours to use for each group node.
fontsize_groups : int
The font size of the text used for group node labels.
facecolor : str
Color to use for background. See matplotlib.colors.
textcolor : str
Color to use for text. See matplotlib.colors.
node_edgecolor : str
Color to use for lines around nodes. See matplotlib.colors.
linewidth : float
Line width to use for connections.
colormap : str
Colormap to use for coloring the connections.
vmin : float | None
Minimum value for colormap. If None, it is determined automatically.
vmax : float | None
Maximum value for colormap. If None, it is determined automatically.
colorbar : bool
Display a colorbar or not.
title : str
The figure title.
colorbar_size : float
Size of the colorbar.
colorbar_pos : 2-tuple
Position of the colorbar.
fontsize_title : int
Font size to use for title.
fontsize_names : int
Font size to use for node names.
fontsize_colorbar : int
Font size to use for colorbar.
padding : float
Space to add around figure to accommodate long labels.
fig : None | instance of matplotlib.pyplot.Figure
The figure to use. If None, a new figure with the specified background
color will be created.
subplot : int | 3-tuple
Location of the subplot when creating figures with multiple plots. E.g.
121 or (1, 2, 1) for 1 row, 2 columns, plot 1. See
matplotlib.pyplot.subplot.
node_linewidth : float
Line with for nodes.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure handle.
axes : instance of matplotlib.axes.PolarAxesSubplot
The subplot handle.
Modified from mne-python v0.14.
"""
import matplotlib.pyplot as plt
import matplotlib.path as m_path
import matplotlib.patches as m_patches
n_nodes = len(node_names)
n_groups = len(group_node_order)
if group_node_angles is not None:
if len(group_node_angles) != n_groups:
raise ValueError('group_node_angles has to be the same length '
'as group_node_order')
# convert it to radians
group_node_angles = group_node_angles * np.pi / 180
if node_angles is not None:
if len(node_angles) != n_nodes:
raise ValueError('node_angles has to be the same length '
'as node_names')
# convert it to radians
node_angles = node_angles * np.pi / 180
else:
# uniform layout on unit circle
node_angles = np.linspace(0, 2 * np.pi, n_nodes, endpoint=False)
if node_width is None:
# widths correspond to the minimum angle between two nodes
dist_mat = node_angles[None, :] - node_angles[:, None]
dist_mat[np.diag_indices(n_nodes)] = 1e9
node_width = np.min(np.abs(dist_mat))
else:
node_width = node_width * np.pi / 180
if node_colors is not None:
if len(node_colors) < n_nodes:
node_colors = cycle(node_colors)
else:
# assign colors using colormap
node_colors = [plt.cm.Spectral(i / float(n_nodes))
for i in range(n_nodes)]
# handle 1D and 2D connectivity information
if con.ndim == 1:
if indices is None:
raise ValueError('indices has to be provided if con.ndim == 1')
elif con.ndim == 2:
if con.shape[0] != n_nodes or con.shape[1] != n_nodes:
raise ValueError('con has to be 1D or a square matrix')
# we use the lower-triangular part
indices = np.tril_indices(n_nodes, -1)
con = con[indices]
else:
raise ValueError('con has to be 1D or a square matrix')
# get the colormap
if isinstance(colormap, str):
colormap = plt.get_cmap(colormap)
# Make figure background the same colors as axes
if fig is None:
fig = plt.figure(figsize=(8, 8), facecolor=facecolor)
# Use a polar axes
if not isinstance(subplot, tuple):
subplot = (subplot,)
axes = plt.subplot(*subplot, polar=True, facecolor=facecolor)
# No ticks, we'll put our own
plt.xticks([])
plt.yticks([])
# Set y axes limit, add additonal space if requested
# plt.ylim(0, 10 + padding)
# increase space to allow for external group names
plt.ylim(0, 18 + padding)
# Remove the black axes border which may obscure the labels
axes.spines['polar'].set_visible(False)
# Draw lines between connected nodes, only draw the strongest connections
if n_lines is not None and len(con) > n_lines:
con_thresh = np.sort(np.abs(con).ravel())[-n_lines]
else:
con_thresh = 0.
# get the connections which we are drawing and sort by connection strength
# this will allow us to draw the strongest connections first
con_abs = np.abs(con)
con_draw_idx = np.where(con_abs >= con_thresh)[0]
con = con[con_draw_idx]
con_abs = con_abs[con_draw_idx]
indices = [ind[con_draw_idx] for ind in indices]
# now sort them
sort_idx = np.argsort(con_abs)
con_abs = con_abs[sort_idx]
con = con[sort_idx]
indices = [ind[sort_idx] for ind in indices]
# Get vmin vmax for color scaling
if vmin is None:
vmin = np.min(con[np.abs(con) >= con_thresh])
if vmax is None:
vmax = np.max(con)
vrange = vmax - vmin
# We want to add some "noise" to the start and end position of the
# edges: We modulate the noise with the number of connections of the
# node and the connection strength, such that the strongest connections
# are closer to the node center
nodes_n_con = np.zeros((n_nodes), dtype=np.int)
for i, j in zip(indices[0], indices[1]):
nodes_n_con[i] += 1
nodes_n_con[j] += 1
# initalize random number generator so plot is reproducible
rng = np.random.mtrand.RandomState(seed=0)
n_con = len(indices[0])
noise_max = 0.25 * node_width
start_noise = rng.uniform(-noise_max, noise_max, n_con)
end_noise = rng.uniform(-noise_max, noise_max, n_con)
nodes_n_con_seen = np.zeros_like(nodes_n_con)
for i, (start, end) in enumerate(zip(indices[0], indices[1])):
nodes_n_con_seen[start] += 1
nodes_n_con_seen[end] += 1
start_noise[i] *= ((nodes_n_con[start] - nodes_n_con_seen[start]) /
float(nodes_n_con[start]))
end_noise[i] *= ((nodes_n_con[end] - nodes_n_con_seen[end]) /
float(nodes_n_con[end]))
# scale connectivity for colormap (vmin<=>0, vmax<=>1)
con_val_scaled = (con - vmin) / vrange
# Finally, we draw the connections
for pos, (i, j) in enumerate(zip(indices[0], indices[1])):
# Start point
t0, r0 = node_angles[i], 10
# End point
t1, r1 = node_angles[j], 10
# Some noise in start and end point
t0 += start_noise[pos]
t1 += end_noise[pos]
verts = [(t0, r0), (t0, 5), (t1, 5), (t1, r1)]
codes = [m_path.Path.MOVETO, m_path.Path.CURVE4, m_path.Path.CURVE4,
m_path.Path.LINETO]
path = m_path.Path(verts, codes)
color = colormap(con_val_scaled[pos])
# Actual line
patch = m_patches.PathPatch(path, fill=False, edgecolor=color,
linewidth=linewidth, alpha=1.)
axes.add_patch(patch)
# Draw ring with colored nodes
height = np.ones(n_nodes) * 1.0
bars = axes.bar(node_angles, height, width=node_width, bottom=9,
edgecolor=node_edgecolor, lw=node_linewidth,
facecolor='.9', align='center')
for bar, color in zip(bars, node_colors):
bar.set_facecolor(color)
# Draw node labels
angles_deg = 180 * node_angles / np.pi
for name, angle_rad, angle_deg in zip(node_names, node_angles, angles_deg):
if angle_deg >= 270:
ha = 'left'
else:
# Flip the label, so text is always upright
angle_deg += 180
ha = 'right'
axes.text(angle_rad, 10.4, name, size=fontsize_names,
rotation=angle_deg, rotation_mode='anchor',
horizontalalignment=ha, verticalalignment='center',
color=textcolor)
# draw outer ring with group names
group_heights = np.ones(n_groups) * 1.5
group_width = 2 * np.pi/n_groups
# draw ring with group colours
group_bars = axes.bar(group_node_angles, group_heights,
width=group_width, bottom=22,
linewidth=node_linewidth, facecolor='.9',
edgecolor=node_edgecolor)
for gbar, color in zip(group_bars, group_colors):
gbar.set_facecolor(color)
# add group labels
for i in range(group_node_angles.size):
# to modify the position of the labels
theta = group_node_angles[i] + np.pi/n_groups
# theta = group_node_angles[n_groups-1-i] + np.pi/n_groups
plt.text(theta, 22.5, group_node_order[i], rotation=180*theta/np.pi-90,
size=fontsize_groups, horizontalalignment='center',
verticalalignment='center', color=textcolor)
if title is not None:
plt.title(title, color=textcolor, fontsize=fontsize_title,
axes=axes)
if colorbar:
norm = plt.normalize_colors(vmin=vmin, vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm)
sm.set_array(np.linspace(vmin, vmax))
cb = plt.colorbar(sm, ax=axes, use_gridspec=False,
shrink=colorbar_size, orientation='horizontal',
anchor=colorbar_pos)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
cb.ax.tick_params(labelsize=fontsize_colorbar)
plt.setp(cb_yticks, color=textcolor)
plt_show(show)
return fig, axes
def plot_labelled_group_connectivity_circle(yaml_fname, con, orig_labels,
node_order_size=68,
out_fname='circle.png', title=None,
facecolor='white', fontsize_names=6,
subplot=111, include_legend=False,
n_lines=None, fig=None, show=True):
'''
Plot the connectivity circle grouped and ordered according to
groups in the yaml input file provided.
'''
import matplotlib.pyplot as plt
# read the yaml file with grouping
if op.isfile(yaml_fname):
with open(yaml_fname, 'r') as f:
labels = yaml.safe_load(f)
else:
print('%s - File not found.' % yaml_fname)
sys.exit()
cortex_colors = ['m', 'b', 'y', 'c', 'r', 'g',
'g', 'r', 'c', 'y', 'b', 'm']
# make list of label_names (without individual cortex locations)
label_names = list()
for lab in labels:
label_names.extend(labels[lab])
lh_labels = [name + '-lh' for name in label_names]
rh_labels = [name + '-rh' for name in label_names]
group_labels = list(labels.keys())
grp_lh_labels = [name + '-lh' for name in group_labels]
grp_rh_labels = [name + '-rh' for name in group_labels]
# Save the plot order and create a circular layout
node_order = list()
node_order.extend(lh_labels[::-1]) # reverse the order
node_order.extend(rh_labels)
assert len(node_order) == node_order_size, 'Node order length is correct.'
# save the group order
group_node_order = list()
group_node_order.extend(grp_lh_labels[::-1])
group_node_order.extend(grp_rh_labels)
from mne.viz.circle import circular_layout
group_node_angles = circular_layout(group_node_order, group_node_order,
start_pos=90.)
# the respective no. of regions in each cortex
group_bound = [len(labels[key]) for key in list(labels.keys())]
group_bound = [0] + group_bound[::-1] + group_bound
group_boundaries = [sum(group_bound[:i+1])
for i in range(len(group_bound))]
# remove the first element of group_bound
# make label colours such that each cortex is of one colour
group_bound.pop(0)
label_colors = []
for ind, rep in enumerate(group_bound):
label_colors += [cortex_colors[ind]] * rep
assert len(label_colors) == len(node_order), 'Num. of colours do not match'
# remove the last total sum of the list
group_boundaries.pop()
# obtain the node angles
node_angles = circular_layout(orig_labels, node_order, start_pos=90,
group_boundaries=group_boundaries)
# the order of the node_colors must match that of orig_labels
# therefore below reordering is necessary
reordered_colors = [label_colors[node_order.index(orig)]
for orig in orig_labels]
# Plot the graph using node_order and colours
_plot_connectivity_circle_group_bars(con, orig_labels, n_lines=n_lines,
facecolor=facecolor, textcolor='black',
group_node_order=group_node_order,
group_node_angles=group_node_angles,
group_colors=cortex_colors,
fontsize_groups=6, node_angles=node_angles,
node_colors=reordered_colors, fontsize_names=8,
node_edgecolor='white', fig=fig,
colorbar=False, show=show, subplot=subplot,
title=title)
if include_legend:
import matplotlib.patches as mpatches
legend_patches = [mpatches.Patch(color=col, label=key)
for col, key in zip(['g', 'r', 'c', 'y', 'b', 'm'],
list(labels.keys()))]
plt.legend(handles=legend_patches, loc=(0.02, 0.02), ncol=1,
mode=None, fontsize='small')
if out_fname:
plt.savefig(out_fname, facecolor='white', dpi=300)
def plot_fica_grouped_circle(yaml_fname, con, orig_labels, node_order_size,
out_fname='grouped_circle.png', title=None,
facecolor='white', fontsize_names=6,
subplot=111, include_legend=False,
n_lines=None, fig=None, show=True):
'''
Plot the connectivity circle grouped and ordered according to
groups in the yaml input file provided.
This is not specific to 'aparc' parcellation and does not split the labels
into left and right hemispheres.
Note: Currently requires fica_names.txt in jumeg/examples. This needs to
be removed.
'''
import matplotlib.pyplot as plt
# read the yaml file with grouping
if op.isfile(yaml_fname):
with open(yaml_fname, 'r') as f:
labels = yaml.safe_load(f)
else:
print('%s - File not found.' % yaml_fname)
sys.exit()
cortex_colors = ['m', 'b', 'y', 'c', 'r', 'g']
# make list of label_names (without individual cortex locations)
label_names = list()
for lab in labels:
label_names.extend(labels[lab])
group_labels = list(labels.keys())
# Save the plot order and create a circular layout
node_order = label_names
assert len(node_order) == node_order_size, 'Node order length is correct.'
# save the group order
group_node_order = group_labels
from mne.viz.circle import circular_layout
group_node_angles = circular_layout(group_node_order, group_node_order,
start_pos=75.)
# the respective no. of regions in each cortex
group_bound = [len(labels[key]) for key in list(labels.keys())]
group_bound = [0] + group_bound
# group_bound = [0] + group_bound[::-1] + group_bound
group_boundaries = [sum(group_bound[:i+1])
for i in range(len(group_bound))]
# remove the first element of group_bound
# make label colours such that each cortex is of one colour
group_bound.pop(0)
label_colors = []
for ind, rep in enumerate(group_bound):
label_colors += [cortex_colors[ind]] * rep
assert len(label_colors) == len(node_order), 'Num. of colours do not match'
# remove the last total sum of the list
group_boundaries.pop()
# obtain the node angles
node_angles = circular_layout(orig_labels, node_order, start_pos=90,
group_boundaries=group_boundaries)
# the order of the node_colors must match that of orig_labels
# therefore below reordering is necessary
reordered_colors = [label_colors[node_order.index(orig)]
for orig in orig_labels]
# Plot the graph using node_order and colours
_plot_connectivity_circle_group_bars(con, orig_labels, n_lines=n_lines,
facecolor=facecolor, textcolor='black',
group_node_order=group_node_order,
group_node_angles=group_node_angles,
group_colors=cortex_colors,
fontsize_groups=6, node_angles=node_angles,
node_colors=reordered_colors, fontsize_names=8,
node_edgecolor='white', fig=fig,
colorbar=False, show=show, subplot=subplot,
title=title)
if include_legend:
import matplotlib.patches as mpatches
legend_patches = [mpatches.Patch(color=col, label=key)
for col, key in zip(['g', 'r', 'c', 'y', 'b', 'm'],
list(labels.keys()))]
plt.legend(handles=legend_patches, loc=(0.02, 0.02), ncol=1,
mode=None, fontsize='small')
if out_fname:
plt.savefig(out_fname, facecolor='white', dpi=300)
| bsd-3-clause |
wathen/PhD | MHD/FEniCS/VectorLaplacian/BDMLaplacianStrong.py | 1 | 4716 | # from MatrixOperations import *
from dolfin import *
import ipdb
import scipy.linalg
import numpy as np
import matplotlib.pyplot as plt
# from MatrixOperations import *
# MO = MatrixOperations()
m = 6
errL2 = np.zeros((m-1,1))
errDIV= np.zeros((m-1,1))
errH1 = np.zeros((m-1,1))
errDG = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
nn = 2
dim = 2
Solving = 'yes'
Saving = 'no'
if Saving == 'yes':
parameters['linear_algebra_backend'] = 'Epetra'
else:
parameters['linear_algebra_backend'] = 'PETSc'
for xx in xrange(1,m):
print xx
nn = 2**xx
NN[xx-1] = nn
# Create mesh and define function space
nn = int(nn)
if dim == 3:
mesh = UnitCubeMesh(nn,nn,nn)
else:
mesh = UnitSquareMesh(nn,nn)
V =FunctionSpace(mesh, "BDM", 1 )
# creating trial and test function s
v = TestFunction(V)
u = TrialFunction(V)
def boundary(x, on_boundary):
return on_boundary
if dim == 3:
u0 = Expression(("0","0","0"))
else:
u0 = Expression(('x[0]*x[1]','x[0]*x[1]'))
N = FacetNormal(mesh)
# defining boundary conditions
bcs = DirichletBC(V,u0, boundary)
# Creating RHS function
if dim == 3:
f = Expression(('- 2*(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])-2*(x[0]*x[0]-x[0])*(x[2]*x[2]-x[2])-2*(x[1]*x[1]-x[1])*(x[2]*x[2]-x[2])', \
'- 2*(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])-2*(x[0]*x[0]-x[0])*(x[2]*x[2]-x[2])-2*(x[1]*x[1]-x[1])*(x[2]*x[2]-x[2])', \
'- 2*(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])-2*(x[0]*x[0]-x[0])*(x[2]*x[2]-x[2])-2*(x[1]*x[1]-x[1])*(x[2]*x[2]-x[2])'))
else:
f = Expression(('- 2*(x[1]*x[1]-x[1])-2*(x[0]*x[0]-x[0])','-2*(x[0]*x[0]-x[0]) - 2*(x[1]*x[1]-x[1])'))
# f = Expression(("0","0")
# defining normal component
h = CellSize(mesh)
h_avg =avg(h)
alpha = 10.0
gamma =10.0
a = inner(grad(v), grad(u))*dx \
- inner(avg(grad(v)), outer(u('+'),N('+'))+outer(u('-'),N('-')))*dS \
- inner(outer(v('+'),N('+'))+outer(v('-'),N('-')), avg(grad(u)))*dS \
+ alpha/h_avg*inner(outer(v('+'),N('+'))+outer(v('-'),N('-')),outer(u('+'),N('+'))+outer(u('-'),N('-')))*dS \
- inner(outer(v,N), grad(u))*ds \
- inner(grad(v), outer(u,N))*ds \
+ gamma/h*inner(v,u)*ds
L = inner(v,f)*dx + gamma/h*inner(u0,v)*ds - inner(grad(v),outer(u0,N))*ds
# assemebling system
AA,bb = assemble_system(a,L,bcs)
DoF[xx-1] = bb.array().size
u = Function(V)
if Solving == 'yes':
# tic()
# set_log_level(PROGRESS)
# solver = KrylovSolver("cg","amg")
# solver.parameters["relative_tolerance"] = 1e-10
# solver.parameters["absolute_tolerance"] = 1e-10
# solver.solve(AA,u.vector(),bb)
# set_log_level(PROGRESS)
# print 'time to solve linear system', toc(),'\n\n'
print 'DoF', DoF[xx-1]
solve(a==L,u,bcs)
if dim == 3:
ue = Expression(('x[0]*x[1]*x[2]*(x[1]-1)*(x[2]-1)*(x[0]-1)','x[0]*x[1]*x[2]*(x[1]-1)*(x[2]-1)*(x[0]-1)','x[0]*x[1]*x[2]*(x[1]-1)*(x[2]-1)*(x[0]-1)'))
else:
ue = Expression(('(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])','(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])'))
ue = Expression(('(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])+x[0]*x[1]','(x[1]*x[1]-x[1])*(x[0]*x[0]-x[0])+x[0]*x[1]'))
# ue = Expression(('x[0]*x[1]','x[0]*x[1]'))
e = ue- Function(V,u)
uu= Function(V,u)
errL2[xx-1]=errornorm(ue,Function(V,u),norm_type="L2", degree_rise=4,mesh=mesh)
errDIV[xx-1]=errornorm(ue,Function(V,u),norm_type="Hdiv", degree_rise=4,mesh=mesh)
errH1[xx-1]=errornorm(ue,Function(V,u),norm_type="H1", degree_rise=4,mesh=mesh)
errDG[xx-1] = errL2[xx-1] +errH1[xx-1]
print errL2[xx-1],errDIV[xx-1],errH1[xx-1],errDG[xx-1]
####if Saving == 'yes':
#MO.SaveEpertaMatrix(AA.down_cast().mat(),"A2d")
# plot(u)
# plot(interpolate(ue,V))
# interactive()
plt.loglog(NN,errL2)
plt.title('Error plot for BDM2 elements - L2 convergence = %f' % np.log2(np.average((errL2[0:m-2]/errL2[1:m-1]))))
plt.xlabel('N')
plt.ylabel('L2 error')
plt.figure()
plt.loglog(NN,errDIV)
plt.title('Error plot for BDM2 elements - Hdiv convergence = %f' % np.log2(np.average((errDIV[0:m-2]/errDIV[1:m-1]))))
plt.xlabel('N')
plt.ylabel('Hdiv error')
plt.figure()
plt.loglog(NN,errH1)
plt.title('Error plot for BDM2 elements - H1 convergence = %f' % np.log2(np.average((errH1[0:m-2]/errH1[1:m-1]))))
plt.xlabel('N')
plt.ylabel('H1 error')
plt.figure()
plt.loglog(NN,errDG)
plt.title('Error plot for BDM2 elements - DG convergence = %f' % np.log2(np.average((errDG[0:m-2]/errDG[1:m-1]))))
plt.xlabel('N')
plt.ylabel('H1 error')
plt.show()
| mit |
peterwilletts24/Python-Scripts | plot_scripts/EMBRACE/rad_flux/plot_from_pp_2201_diff.py | 1 | 8618 | """
Load pp, plot and save
"""
import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.analysis.cartography
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import scipy.interpolate
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#experiment_ids = ['dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
save_path='/nfs/a90/eepdw/Mean_State_Plot_Data/Figures/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/unrotate_pole.py')
pp_file = '2201_mean'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
min_contour = -50
max_contour = 50
tick_interval=20
#
# cmap= cm.s3pcpn_l
divisor=10 # for lat/lon rounding
def main():
# Min and max lats lons from smallest model domain (dkbhu) - see spreadsheet
latmin=-6.79
latmax=29.721
lonmin=340.
lonmax=379.98
lat_constraint=iris.Constraint(grid_latitude= lambda la: latmin <= la.point <= latmax)
lon_constraint=iris.Constraint(grid_longitude= lambda lo: lonmin <= lo.point <= lonmax)
# Global LAM not rotated - so different coord constraints
lonmin_g=64.1153327
lonmax_g=101.865817
lon_constraint_g = iris.Constraint(grid_longitude= lambda lo: lonmin_g <= lo.point <= lonmax_g)
# Load global cube
gl = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/djzn/djznw/%s.pp' % pp_file
glob = iris.load_cube(gl, lat_constraint & lon_constraint_g)
#glob = iris.load_cube(gl)
cs_glob = glob.coord_system('CoordSystem')
# Unrotate global cube
lat_g = glob.coord('grid_latitude').points
lon_g = glob.coord('grid_longitude').points
#print lat_g
if isinstance(cs_glob, iris.coord_systems.RotatedGeogCS):
print ' Global Model - djznw - Unrotate pole %s' % cs_glob
lons_g, lats_g = np.meshgrid(lon_g, lat_g)
lons_g,lats_g = iris.analysis.cartography.unrotate_pole(lons_g,lats_g, cs_glob.grid_north_pole_longitude, cs_glob.grid_north_pole_latitude)
lon_g=lons_g[0]
lat_g=lats_g[:,0]
#print lats_g
for i, coord in enumerate (glob.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_glob = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_glob = i
csur_glob=cs_glob.ellipsoid
glob.remove_coord('grid_latitude')
glob.remove_coord('grid_longitude')
glob.add_dim_coord(iris.coords.DimCoord(points=lat_g, standard_name='grid_latitude', units='degrees', coord_system=csur_glob), lat_dim_coord_glob)
glob.add_dim_coord(iris.coords.DimCoord(points=lon_g, standard_name='grid_longitude', units='degrees', coord_system=csur_glob), lon_dim_coord_glob)
experiment_ids = ['djzny', 'djznq', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#experiment_ids = ['djzny' ]
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file)
#pc = iris(pfile)
#pcube = iris.load_cube(pfile, lat_constraint & lon_constraint)
pcube = iris.load_cube(pfile)
#print pcube
#print pc
# Get min and max latitude/longitude and unrotate to get min/max corners to crop plot automatically - otherwise end with blank bits on the edges
# Unrotate cube
lat = pcube.coord('grid_latitude').points
lon = pcube.coord('grid_longitude').points
#print lat
#print 'lat'
#print lon
cs = pcube.coord_system('CoordSystem')
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print ' %s - Unrotate pole %s' % (experiment_id,cs)
lons, lats = np.meshgrid(lon, lat)
lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon=lons[0]
lat=lats[:,0]
for i, coord in enumerate (pcube.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord = i
if coord.standard_name=='grid_longitude':
lon_dim_coord = i
csur=cs.ellipsoid
pcube.remove_coord('grid_latitude')
pcube.remove_coord('grid_longitude')
pcube.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord)
pcube.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord)
lon_min=np.min(lons_g)
lon_max=np.max(lons_g)
lon_low_tick=lon_min -(lon_min%divisor)
lon_high_tick=math.ceil(lon_max/divisor)*divisor
lat_min=np.min(lats_g)
lat_max=np.max(lats_g)
lat_low_tick=lat_min - (lat_min%divisor)
lat_high_tick=math.ceil(lat_max/divisor)*divisor
print lon_high_tick
print lon_low_tick
pcube_regrid_data = scipy.interpolate.griddata((lats.flatten(), lons.flatten()),pcube.data.flatten(), (lats_g, lons_g), method='linear')
#pcube_regrid = iris.analysis.interpolate.linear(pcube, sample_points)
#print pcube.data.flatten()
pcube_regrid = glob.copy(data=pcube_regrid_data)
pcubediff=pcube_regrid-glob
#print pcube.data[0,0]
#print pcube_regrid_data[0,0]
#print pcubediff.data
#print glob.data[0,0]
plt.figure(figsize=(8,8))
cmap= cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lonmin_g+2,lonmax_g-2,latmin+degs_crop_bottom,latmax-degs_crop_top))
clevs = np.linspace(min_contour, max_contour,256)
cont = iplt.contourf(pcubediff, clevs, cmap=cmap, extend='both')
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'black'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'black'}
cbar = plt.colorbar(cont, orientation='horizontal', pad=0.05, extend='both', format = '%d')
#cbar.set_label('')
cbar.set_label(pcube.units, fontsize=10)
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['%d' % i for i in ticks])
main_title='%s - Difference' % pcube.standard_name.title().replace('_',' ')
model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
model_info = re.sub(r'[(\']', ' ', model_info)
model_info = re.sub(r'[\',)]', ' ', model_info)
print model_info
if not os.path.exists('%s%s/%s' % (save_path, experiment_id, pp_file)): os.makedirs('%s%s/%s' % (save_path, experiment_id, pp_file))
plt.savefig('%s%s/%s/%s_%s_notitle_diff.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
#plt.show()
plt.savefig('%s%s/%s/%s_%s_diff.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file), format='png', bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| mit |
inodb/revmut | tests/test_find.py | 1 | 1633 | from cStringIO import StringIO
import sys
import os
from nose.tools import ok_, assert_equals
import numpy as np
from numpy.testing import assert_array_almost_equal
from os.path import join as ospj
import pandas as pd
from pandas.util.testing import assert_frame_equal
FILE_PATH = os.path.realpath(__file__)
TEST_DIR_PATH = os.path.dirname(FILE_PATH)
DATA_PATH = os.path.abspath(ospj(TEST_DIR_PATH, "test_data"))
TMP_DIR_PATH = ospj(TEST_DIR_PATH, "nose_tmp_output")
TMP_BASENAME_DIR = ospj(TMP_DIR_PATH, "validation")
PKG_PATH = ospj(TEST_DIR_PATH, '..')
sys.path.append(PKG_PATH)
from revmut.find import find_revertant_mutations
from revmut import utils
class TestRevertantMutation(object):
def setUp(self):
"""Delete temporary dir if it exists then create it"""
self.tearDown()
utils.mkdir_p(TMP_BASENAME_DIR)
def tearDown(self):
"""remove temp output files"""
utils.rm_rf(TMP_DIR_PATH)
def test_find(self):
out = StringIO()
reffa = ospj(DATA_PATH, "human_g1k_v37_chr17.fa")
mutations_tsv = ospj(DATA_PATH, "germline_mutations", "T1_test_mutation.tsv")
search_bam = ospj(DATA_PATH, "T1.bam")
normal_bam = ospj(DATA_PATH, "N1.bam")
find_revertant_mutations(reffa, mutations_tsv, search_bam, normal_bam, out)
out.seek(0)
test = pd.read_csv(out, sep="\t")
truth = pd.read_csv(ospj(DATA_PATH, "output", "T1_test.tsv"), sep="\t")
assert_frame_equal(truth.drop("MAF", axis=1),
test.drop("MAF", axis=1))
assert_array_almost_equal(truth.MAF, test.MAF, decimal=6)
| mit |
Eric89GXL/scikit-learn | sklearn/neural_network/rbm.py | 1 | 10625 | """Restricted Boltzmann Machine
"""
# Main author: Yann N. Dauphin <dauphiya@iro.umontreal.ca>
# Author: Vlad Niculae
# Author: Gabriel Synnaeve
# License: BSD Style.
import time
import numpy as np
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_arrays
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import logistic_sigmoid
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. Enabling it (with a non-zero value) will compute
the log-likelihood of each mini-batch and hence cause a runtime overhead
in the order of 10%.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
`components_` : array-like, shape (n_components, n_features), optional
Weight matrix, where n_features in the number of visible
units and n_components is the number of hidden units.
`intercept_hidden_` : array-like, shape (n_components,), optional
Biases of the hidden units.
`intercept_visible_` : array-like, shape (n_features,), optional
Biases of the visible units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=False)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=False, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
X, = check_arrays(X, sparse_format='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
return logistic_sigmoid(safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
p[rng.uniform(size=p.shape) < p] = 1.
return np.floor(p, p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = logistic_sigmoid(np.dot(h, self.components_)
+ self.intercept_visible_)
p[rng.uniform(size=p.shape) < p] = 1.
return np.floor(p, p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.log(1. + np.exp(safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_)).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
rng = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, rng)
v_ = self._sample_visibles(h_, rng)
return v_
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
If verbose=True, pseudo-likelihood estimate for this batch.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(v_neg.T, h_neg).T
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
if self.verbose:
return self.score_samples(v_pos)
def score_samples(self, v):
"""Compute the pseudo-likelihood of v.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy to likelihood).
"""
rng = check_random_state(self.random_state)
fe = self._free_energy(v)
if issparse(v):
v_ = v.toarray()
else:
v_ = v.copy()
i_ = rng.randint(0, v.shape[1], v.shape[0])
v_[np.arange(v.shape[0]), i_] = 1 - v_[np.arange(v.shape[0]), i_]
fe_ = self._free_energy(v_)
return v.shape[1] * logistic_sigmoid(fe_ - fe, log=True)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X, = check_arrays(X, sparse_format='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches))
verbose = self.verbose
for iteration in xrange(self.n_iter):
pl = 0.
if verbose:
begin = time.time()
for batch_slice in batch_slices:
pl_batch = self._fit(X[batch_slice], rng)
if verbose:
pl += pl_batch.sum()
if verbose:
pl /= n_samples
end = time.time()
print("Iteration %d, pseudo-likelihood = %.2f, time = %.2fs"
% (iteration, pl, end - begin))
return self
| bsd-3-clause |
bharcode/MachineLearning | commons_ml/Logistic_Regression/Logistic_Binary_Classification/Scripts/logistic_regression.py | 2 | 5789 | #!/usr/bin/env python
# logistic_regression.py
# Author : Saimadhu
# Date: 19-March-2017
# About: Implementing Logistic Regression Classifier to predict to whom the voter will vote.
# Required Python Packages
import pandas as pd
import numpy as np
import pdb
import plotly.plotly as py
import plotly.graph_objs as go
# import plotly.plotly as py
# from plotly.graph_objs import *
py.sign_in('dataaspirant', 'RhJdlA1OsXsTjcRA0Kka')
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
# Files
DATA_SET_PATH = "../Inputs/anes_dataset.csv"
def dataset_headers(dataset):
"""
To get the dataset header names
:param dataset: loaded dataset into pandas DataFrame
:return: list of header names
"""
return list(dataset.columns.values)
def unique_observations(dataset, header, method=1):
"""
To get unique observations in the loaded pandas DataFrame column
:param dataset:
:param header:
:param method: Method to perform the unique (default method=1 for pandas and method=0 for numpy )
:return:
"""
try:
if method == 0:
# With Numpy
observations = np.unique(dataset[[header]])
elif method == 1:
# With Pandas
observations = pd.unique(dataset[header].values.ravel())
else:
observations = None
print "Wrong method type, Use 1 for pandas and 0 for numpy"
except Exception as e:
observations = None
print "Error: {error_msg} /n Please check the inputs once..!".format(error_msg=e.message)
return observations
def feature_target_frequency_relation(dataset, f_t_headers):
"""
To get the frequency relation between targets and the unique feature observations
:param dataset:
:param f_t_headers: feature and target header
:return: feature unique observations dictionary of frequency count dictionary
"""
feature_unique_observations = unique_observations(dataset, f_t_headers[0])
unique_targets = unique_observations(dataset, f_t_headers[1])
frequencies = {}
for feature in feature_unique_observations:
frequencies[feature] = {unique_targets[0]: len(
dataset[(dataset[f_t_headers[0]] == feature) & (dataset[f_t_headers[1]] == unique_targets[0])]),
unique_targets[1]: len(
dataset[(dataset[f_t_headers[0]] == feature) & (dataset[f_t_headers[1]] == unique_targets[1])])}
return frequencies
def feature_target_histogram(feature_target_frequencies, feature_header):
"""
:param feature_target_frequencies:
:param feature_header:
:return:
"""
keys = feature_target_frequencies.keys()
y0 = [feature_target_frequencies[key][0] for key in keys]
y1 = [feature_target_frequencies[key][1] for key in keys]
trace1 = go.Bar(
x=keys,
y=y0,
name='Clinton'
)
trace2 = go.Bar(
x=keys,
y=y1,
name='Dole'
)
data = [trace1, trace2]
layout = go.Layout(
barmode='group',
title='Feature :: ' + feature_header + ' Clinton Vs Dole votes Frequency',
xaxis=dict(title="Feature :: " + feature_header + " classes"),
yaxis=dict(title="Votes Frequency")
)
fig = go.Figure(data=data, layout=layout)
# plot_url = py.plot(fig, filename=feature_header + ' - Target - Histogram')
py.image.save_as(fig, filename=feature_header + '_Target_Histogram.png')
def train_logistic_regression(train_x, train_y):
"""
Training logistic regression model with train dataset features(train_x) and target(train_y)
:param train_x:
:param train_y:
:return:
"""
logistic_regression_model = LogisticRegression()
logistic_regression_model.fit(train_x, train_y)
return logistic_regression_model
def model_accuracy(trained_model, features, targets):
"""
Get the accuracy score of the model
:param trained_model:
:param features:
:param targets:
:return:
"""
accuracy_score = trained_model.score(features, targets)
return accuracy_score
def main():
"""
Logistic Regression classifier main
:return:
"""
# Load the data set for training and testing the logistic regression classifier
dataset = pd.read_csv(DATA_SET_PATH)
print "Number of Observations :: ", len(dataset)
# Get the first observation
print dataset.head()
headers = dataset_headers(dataset)
print "Data set headers :: {headers}".format(headers=headers)
training_features = ['TVnews', 'PID', 'age', 'educ', 'income']
target = 'vote'
# Train , Test data split
train_x, test_x, train_y, test_y = train_test_split(dataset[training_features], dataset[target], train_size=0.7)
print "train_x size :: ", train_x.shape
print "train_y size :: ", train_y.shape
print "test_x size :: ", test_x.shape
print "test_y size :: ", test_y.shape
print "edu_target_frequencies :: ", feature_target_frequency_relation(dataset, [training_features[3], target])
for feature in training_features:
feature_target_frequencies = feature_target_frequency_relation(dataset, [feature, target])
feature_target_histogram(feature_target_frequencies, feature)
# Training Logistic regression model
trained_logistic_regression_model = train_logistic_regression(train_x, train_y)
train_accuracy = model_accuracy(trained_logistic_regression_model, train_x, train_y)
# Testing the logistic regression model
test_accuracy = model_accuracy(trained_logistic_regression_model, test_x, test_y)
print "Train Accuracy :: ", train_accuracy
print "Test Accuracy :: ", test_accuracy
if __name__ == "__main__":
main() | gpl-2.0 |
efabless/openlane | scripts/compare_regression_design.py | 1 | 5347 | # Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import subprocess
import csv
import pandas as pd
import os
parser = argparse.ArgumentParser(
description="compare one design from a regression result to a benchmark result")
parser.add_argument('--benchmark', '-b', action='store', required=True,
help="The csv file from which to extract the benchmark results")
parser.add_argument('--regression_results', '-r', action='store', required=True,
help="The csv file to be tested")
parser.add_argument('--design', '-d', action='store', required=True,
help="The design to compare for between the two scripts. Same as -design in flow.tcl")
parser.add_argument('--run_path', '-rp', action='store', required=True,
help="The run path, will be used to search for any missing files.")
parser.add_argument('--output_report', '-o', action='store', required=True,
help="The file to print the final report in")
args = parser.parse_args()
benchmark_file = args.benchmark
regression_results_file = args.regression_results
output_report_file = args.output_report
design = args.design
run_path = args.run_path
tolerance = {'general_tolerance':1, 'tritonRoute_violations':2, 'Magic_violations':10, 'antenna_violations':10, 'lvs_total_errors':0}
critical_statistics = ['tritonRoute_violations','Magic_violations', 'antenna_violations','lvs_total_errors']
magic_file_extensions = ['gds','mag','lef','spice']
def compare_vals(benchmark_value, regression_value, param):
if str(benchmark_value) == "-1":
return True
if str(regression_value) == "-1":
return False
tol = 0-tolerance['general_tolerance']
if param in tolerance.keys():
tol = 0-tolerance[param]
if float(benchmark_value) - float(regression_value) >= tol:
return True
else:
return False
def findIdx(header, label):
for idx in range(len(header)):
if label == header[idx]:
return idx
else:
return -1
def parseCSV(csv_file):
design_out = dict()
csvOpener = open(csv_file, 'r')
csvData = csvOpener.read().split("\n")
headerInfo = csvData[0].split(",")
designPathIdx = findIdx(headerInfo, "design")
if designPathIdx == -1:
print("invalid report. No design paths.")
for i in range(1, len(csvData)):
if len(csvData[i]):
entry = csvData[i].split(",")
designPath=entry[designPathIdx]
if designPath == design:
for idx in range(len(headerInfo)):
if idx != designPathIdx:
design_out[headerInfo[idx]] = entry[idx]
break
return design_out
def criticalMistmatch(benchmark, regression_result):
if len(benchmark) == 0 or len(regression_result) == 0:
return False, "Nothing to compare with"
for stat in critical_statistics:
if compare_vals(benchmark[stat],regression_result[stat],stat):
continue
else:
if str(regression_result[stat]) == "-1":
return True, "The test didn't pass the stage responsible for "+ stat
else:
return True, "The results of " +stat+" mismatched with the benchmark"
return False, "The test passed"
def compareStatus(benchmark,regression_result):
if len(benchmark) == 0 or len(regression_result) == 0:
return False, "Nothing to compare with"
elif "fail" in str(benchmark["flow_status"]):
return False, "The test passed"
elif "fail" in str(regression_result["flow_status"]):
return True, "The flow didn't complete for the user design after magic drc."
else:
return False, "The test passed"
def missingResultingFiles(design):
searchPrefix = str(run_path) + '/results/magic/' + str(design['design_name'])
for ext in magic_file_extensions:
File = searchPrefix+'.'+str(ext)
if os.path.isfile(File) == False:
return True, "File "+File+" is missing from the results directory"
return False, "The test passed"
benchmark = parseCSV(benchmark_file)
regression_result = parseCSV(regression_results_file)
testFail, reasonWhy = criticalMistmatch(benchmark,regression_result)
report = str(design)
if testFail:
report += ",FAILED,"+reasonWhy+"\n"
else:
testFail, reasonWhy = missingResultingFiles(regression_result)
if testFail:
report += ",FAILED,"+reasonWhy+"\n"
else:
testFail, reasonWhy = compareStatus(benchmark,regression_result)
if testFail:
report += ",FAILED,"+reasonWhy+"\n"
else:
report += ",PASSED,"+reasonWhy+"\n"
outputReportOpener = open(output_report_file, 'a+')
outputReportOpener.write(report)
outputReportOpener.close() | apache-2.0 |
Kongsea/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions.py | 10 | 18972 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _fill_array(arr, seq, fillvalue=0):
"""
Recursively fills padded arr with elements from seq.
If length of seq is less than arr padded length, fillvalue used.
Args:
arr: Padded tensor of shape [batch_size, ..., max_padded_dim_len].
seq: Non-padded list of data sampels of shape
[batch_size, ..., padded_dim(None)]
fillvalue: Default fillvalue to use.
"""
if arr.ndim == 1:
try:
len_ = len(seq)
except TypeError:
len_ = 0
arr[:len_] = seq
arr[len_:] = fillvalue
else:
for subarr, subseq in six.moves.zip_longest(arr, seq, fillvalue=()):
_fill_array(subarr, subseq, fillvalue)
def _pad_if_needed(batch_key_item, fillvalue=0):
""" Returns padded batch.
Args:
batch_key_item: List of data samples of any type with shape
[batch_size, ..., padded_dim(None)].
fillvalue: Default fillvalue to use.
Returns:
Padded with zeros tensor of same type and shape
[batch_size, ..., max_padded_dim_len].
Raises:
ValueError if data samples have different shapes (except last padded dim).
"""
shapes = [seq.shape[:-1] if len(seq.shape) > 0 else -1
for seq in batch_key_item]
if not all(shapes[0] == x for x in shapes):
raise ValueError("Array shapes must match.")
last_length = [seq.shape[-1] if len(seq.shape) > 0 else 0
for seq in batch_key_item]
if all([x == last_length[0] for x in last_length]):
return batch_key_item
batch_size = len(batch_key_item)
max_sequence_length = max(last_length)
result_batch = np.zeros(
shape=[batch_size] + list(shapes[0]) + [max_sequence_length],
dtype=batch_key_item[0].dtype)
_fill_array(result_batch, batch_key_item, fillvalue)
return result_batch
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None,
pad_value=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
self._pad_value = pad_value
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun "
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
if self._pad_value is not None:
feed_dict = {key: np.asarray(_pad_if_needed(item, self._pad_value))
for key, item in list(list_dict.items())}
else:
feed_dict = {key: np.asarray(item)
for key, item in list(list_dict.items())}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None,
pad_value=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
pad_value: default value for dynamic padding of data samples, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
NotImplementedError: padding and shuffling data at the same time.
NotImplementedError: padding usage with non generator data type.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
pad_data = pad_value is not None
if pad_data and get_feed_fn is not _GeneratorFeedFn:
raise NotImplementedError(
"padding is only available with generator usage")
if shuffle and pad_data:
raise NotImplementedError(
"padding and shuffling data at the same time is not implemented")
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
elif pad_data:
min_after_dequeue = 0 # just for the summary text
queue_shapes = list(map(
lambda x: tuple(list(x[:-1]) + [None]) if len(x) > 0 else x,
queue_shapes))
queue = data_flow_ops.PaddingFIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
if not pad_data:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
else:
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs,
pad_value=pad_value))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| apache-2.0 |
glorizen/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/dates.py | 54 | 33991 | #!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing
on the shoulders of python :mod:`datetime`, the add-on modules
:mod:`pytz` and :mod:`dateutils`. :class:`datetime` objects are
converted to floating point numbers which represent the number of days
since 0001-01-01 UTC. The helper functions :func:`date2num`,
:func:`num2date` and :func:`drange` are used to facilitate easy
conversion to and from :mod:`datetime` and numeric ranges.
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pytz.sourceforge.net>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <http://labix.org/python-dateutil>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, eg MO, TU
* :class:`MonthLocator`: locate months, eg 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutils.rrule` (`dateutil
<https://moin.conectiva.com.br/DateUtil>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
Date formatters
---------------
Here all all the date formatters:
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
import re, time, math, datetime
import pytz
# compatability for 2008c and older versions
try:
import pytz.zoneinfo
except ImportError:
pytz.zoneinfo = pytz.tzinfo
pytz.zoneinfo.UTC = pytz.UTC
import matplotlib
import numpy as np
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
from pytz import timezone
from dateutil.rrule import rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, \
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY
from dateutil.relativedelta import relativedelta
import dateutil.parser
__all__ = ( 'date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'DateLocator', 'RRuleLocator',
'YearLocator', 'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'rrule', 'MO', 'TU', 'WE', 'TH', 'FR',
'SA', 'SU', 'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
UTC = pytz.timezone('UTC')
def _get_rc_timezone():
s = matplotlib.rcParams['timezone']
return pytz.timezone(s)
HOURS_PER_DAY = 24.
MINUTES_PER_DAY = 60.*HOURS_PER_DAY
SECONDS_PER_DAY = 60.*MINUTES_PER_DAY
MUSECONDS_PER_DAY = 1e6*SECONDS_PER_DAY
SEC_PER_MIN = 60
SEC_PER_HOUR = 3600
SEC_PER_DAY = SEC_PER_HOUR * 24
SEC_PER_WEEK = SEC_PER_DAY * 7
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` to the Gregorian date as UTC float days,
preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if hasattr(dt, 'hour'):
base += (dt.hour/HOURS_PER_DAY + dt.minute/MINUTES_PER_DAY +
dt.second/SECONDS_PER_DAY + dt.microsecond/MUSECONDS_PER_DAY
)
return base
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
"""
if tz is None: tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix)
remainder = float(x) - ix
hour, remainder = divmod(24*remainder, 1)
minute, remainder = divmod(60*remainder, 1)
second, remainder = divmod(60*remainder, 1)
microsecond = int(1e6*remainder)
if microsecond<10: microsecond=0 # compensate for rounding errors
dt = datetime.datetime(
dt.year, dt.month, dt.day, int(hour), int(minute), int(second),
microsecond, tzinfo=UTC).astimezone(tz)
if microsecond>999990: # compensate for rounding errors
dt += datetime.timedelta(microseconds=1e6-microsecond)
return dt
class strpdate2num:
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
def datestr2num(d):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`. *d* can be a single string or a
sequence of strings.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d)
return date2num(dt)
else:
return date2num([dateutil.parser.parse(s) for s in d])
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC.
"""
if not cbook.iterable(d): return _to_ordinalf(d)
else: return np.asarray([_to_ordinalf(val) for val in d])
def julian2num(j):
'Convert a Julian date (or sequence) to a matplotlib date (or sequence).'
if cbook.iterable(j): j = np.asarray(j)
return j + 1721425.5
def num2julian(n):
'Convert a matplotlib date (or sequence) to a Julian date (or sequence).'
if cbook.iterable(n): n = np.asarray(n)
return n - 1721425.5
def num2date(x, tz=None):
"""
*x* is a float value which gives number of days (fraction part
represents hours, minutes, seconds) since 0001-01-01 00:00:00 UTC.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None: tz = _get_rc_timezone()
if not cbook.iterable(x): return _from_ordinalf(x, tz)
else: return [_from_ordinalf(val, tz) for val in x]
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
step = (delta.days + delta.seconds/SECONDS_PER_DAY +
delta.microseconds/MUSECONDS_PER_DAY)
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
return np.arange(f1, f2, step)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is an :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None: tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _findall(self, text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i=j+1
return sites
# Dalke: I hope I did this math right. Every 28 years the
# calendar repeats, except through century leap years excepting
# the 400 year leap years. But only if you're using the Gregorian
# calendar.
def strftime(self, dt, fmt):
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year > 1900:
return cbook.unicode_safe(dt.strftime(fmt))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6*(delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year)//28)*28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = self._findall(s1, str(year))
s2 = time.strftime(fmt, (year+28,) + timetuple[1:])
sites2 = self._findall(s2, str(year+28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%4d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site+4:]
return cbook.unicode_safe(s)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None: tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind>=len(self.t) or ind<=0: return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None):
self._locator = locator
self._formatter = DateFormatter("%b %d %Y %H:%M:%S %Z", tz)
self._tz = tz
def __call__(self, x, pos=0):
scale = float( self._locator._get_unit() )
if ( scale == 365.0 ):
self._formatter = DateFormatter("%Y", self._tz)
elif ( scale == 30.0 ):
self._formatter = DateFormatter("%b %Y", self._tz)
elif ( (scale == 1.0) or (scale == 7.0) ):
self._formatter = DateFormatter("%b %d %Y", self._tz)
elif ( scale == (1.0/24.0) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
elif ( scale == (1.0/(24*60)) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
elif ( scale == (1.0/(24*3600)) ):
self._formatter = DateFormatter("%H:%M:%S %Z", self._tz)
else:
self._formatter = DateFormatter("%b %d %Y %H:%M:%S %Z", self._tz)
return self._formatter(x, pos)
class rrulewrapper:
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
hms0d = {'byhour':0, 'byminute':0,'bysecond':0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None: tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
self.tz = tz
def datalim_to_dt(self):
dmin, dmax = self.axis.get_data_interval()
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
vmin, vmax = self.axis.get_view_interval()
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def nonsingular(self, vmin, vmax):
unit = self._get_unit()
vmin -= 2*unit
vmax += 2*unit
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try: dmin, dmax = self.viewlim_to_dt()
except ValueError: return []
if dmin>dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
self.rule.set(dtstart=dmin-delta, until=dmax+delta)
dates = self.rule.between(dmin, dmax, True)
return date2num(dates)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
if ( freq == YEARLY ):
return 365
elif ( freq == MONTHLY ):
return 30
elif ( freq == WEEKLY ):
return 7
elif ( freq == DAILY ):
return 1
elif ( freq == HOURLY ):
return (1.0/24.0)
elif ( freq == MINUTELY ):
return (1.0/(24*60))
elif ( freq == SECONDLY ):
return (1.0/(24*3600))
else:
# error
return -1 #or should this just return '1'?
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
if dmin>dmax:
dmax, dmin = dmin, dmax
delta = relativedelta(dmax, dmin)
self.rule.set(dtstart=dmin-delta, until=dmax+delta)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin: vmin=dmin
vmax = self.rule.after(dmax, True)
if not vmax: vmax=dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None):
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if ( self._freq == YEARLY ):
return 365.0
elif ( self._freq == MONTHLY ):
return 30.0
elif ( self._freq == WEEKLY ):
return 7.0
elif ( self._freq == DAILY ):
return 1.0
elif ( self._freq == HOURLY ):
return 1.0/24
elif ( self._freq == MINUTELY ):
return 1.0/(24*60)
elif ( self._freq == SECONDLY ):
return 1.0/(24*3600)
else:
# error
return -1
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
numYears = (delta.years * 1.0)
numMonths = (numYears * 12.0) + delta.months
numDays = (numMonths * 31.0) + delta.days
numHours = (numDays * 24.0) + delta.hours
numMinutes = (numHours * 60.0) + delta.minutes
numSeconds = (numMinutes * 60.0) + delta.seconds
numticks = 5
# self._freq = YEARLY
interval = 1
bymonth = 1
bymonthday = 1
byhour = 0
byminute = 0
bysecond = 0
if ( numYears >= numticks ):
self._freq = YEARLY
elif ( numMonths >= numticks ):
self._freq = MONTHLY
bymonth = range(1, 13)
if ( (0 <= numMonths) and (numMonths <= 14) ):
interval = 1 # show every month
elif ( (15 <= numMonths) and (numMonths <= 29) ):
interval = 3 # show every 3 months
elif ( (30 <= numMonths) and (numMonths <= 44) ):
interval = 4 # show every 4 months
else: # 45 <= numMonths <= 59
interval = 6 # show every 6 months
elif ( numDays >= numticks ):
self._freq = DAILY
bymonth = None
bymonthday = range(1, 32)
if ( (0 <= numDays) and (numDays <= 9) ):
interval = 1 # show every day
elif ( (10 <= numDays) and (numDays <= 19) ):
interval = 2 # show every 2 days
elif ( (20 <= numDays) and (numDays <= 49) ):
interval = 3 # show every 3 days
elif ( (50 <= numDays) and (numDays <= 99) ):
interval = 7 # show every 1 week
else: # 100 <= numDays <= ~150
interval = 14 # show every 2 weeks
elif ( numHours >= numticks ):
self._freq = HOURLY
bymonth = None
bymonthday = None
byhour = range(0, 24) # show every hour
if ( (0 <= numHours) and (numHours <= 14) ):
interval = 1 # show every hour
elif ( (15 <= numHours) and (numHours <= 30) ):
interval = 2 # show every 2 hours
elif ( (30 <= numHours) and (numHours <= 45) ):
interval = 3 # show every 3 hours
elif ( (45 <= numHours) and (numHours <= 68) ):
interval = 4 # show every 4 hours
elif ( (68 <= numHours) and (numHours <= 90) ):
interval = 6 # show every 6 hours
else: # 90 <= numHours <= 120
interval = 12 # show every 12 hours
elif ( numMinutes >= numticks ):
self._freq = MINUTELY
bymonth = None
bymonthday = None
byhour = None
byminute = range(0, 60)
if ( numMinutes > (10.0 * numticks) ):
interval = 10
# end if
elif ( numSeconds >= numticks ):
self._freq = SECONDLY
bymonth = None
bymonthday = None
byhour = None
byminute = None
bysecond = range(0, 60)
if ( numSeconds > (10.0 * numticks) ):
interval = 10
# end if
else:
# do what?
# microseconds as floats, but floats from what reference point?
pass
rrule = rrulewrapper( self._freq, interval=interval, \
dtstart=dmin, until=dmax, \
bymonth=bymonth, bymonthday=bymonthday, \
byhour=byhour, byminute = byminute, \
bysecond=bysecond )
locator = RRuleLocator(rrule, self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = { 'month' : month,
'day' : day,
'hour' : 0,
'minute' : 0,
'second' : 0,
'tzinfo' : tz
}
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 365
def __call__(self):
dmin, dmax = self.viewlim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
ticks = [dmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year>=ymax: return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, eg 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None: bymonth=range(1,13)
o = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 30
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutils.rrule`.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
o = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 7
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None: bymonthday=range(1,32)
o = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, o, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None: byhour=range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
return how many days a unit of the locator is; use for
intelligent autoscaling
"""
return 1/24.
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None: byminute=range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1./(24*60)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None: bysecond=range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1./(24*60*60)
def _close_to_dt(d1, d2, epsilon=5):
'Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.'
delta = d2-d1
mus = abs(delta.days*MUSECONDS_PER_DAY + delta.seconds*1e6 +
delta.microseconds)
assert(mus<epsilon)
def _close_to_num(o1, o2, epsilon=5):
'Assert that float ordinals *o1* and *o2* are within *epsilon* microseconds.'
delta = abs((o2-o1)*MUSECONDS_PER_DAY)
assert(delta<epsilon)
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
spd = 24.*3600.
return 719163 + np.asarray(e)/spd
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
spd = 24.*3600.
return (np.asarray(d)-719163)*spd
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar: return ret[0]
else: return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span==0: span = 1/24.
minutes = span*24*60
hours = span*24
days = span
weeks = span/7.
months = span/31. # approx
years = span/365.
if years>numticks:
locator = YearLocator(int(years/numticks), tz=tz) # define
fmt = '%Y'
elif months>numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif weeks>numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days>numticks:
locator = DayLocator(interval=int(math.ceil(days/numticks)), tz=tz)
fmt = '%b %d'
elif hours>numticks:
locator = HourLocator(interval=int(math.ceil(hours/numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif minutes>numticks:
locator = MinuteLocator(interval=int(math.ceil(minutes/numticks)), tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
'Return seconds as days.'
return float(s)/SEC_PER_DAY
def minutes(m):
'Return minutes as days.'
return float(m)/MINUTES_PER_DAY
def hours(h):
'Return hours as days.'
return h/24.
def weeks(w):
'Return weeks as days.'
return w*7.
class DateConverter(units.ConversionInterface):
def axisinfo(unit):
'return the unit AxisInfo'
if unit=='date':
majloc = AutoDateLocator()
majfmt = AutoDateFormatter(majloc)
return units.AxisInfo(
majloc = majloc,
majfmt = majfmt,
label='',
)
else: return None
axisinfo = staticmethod(axisinfo)
def convert(value, unit):
if units.ConversionInterface.is_numlike(value): return value
return date2num(value)
convert = staticmethod(convert)
def default_units(x):
'Return the default unit for *x* or None'
return 'date'
default_units = staticmethod(default_units)
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
if __name__=='__main__':
#tz = None
tz = pytz.timezone('US/Pacific')
#tz = UTC
dt = datetime.datetime(1011, 10, 9, 13, 44, 22, 101010, tzinfo=tz)
x = date2num(dt)
_close_to_dt(dt, num2date(x, tz))
#tz = _get_rc_timezone()
d1 = datetime.datetime( 2000, 3, 1, tzinfo=tz)
d2 = datetime.datetime( 2000, 3, 5, tzinfo=tz)
#d1 = datetime.datetime( 2002, 1, 5, tzinfo=tz)
#d2 = datetime.datetime( 2003, 12, 1, tzinfo=tz)
delta = datetime.timedelta(hours=6)
dates = drange(d1, d2, delta)
# MGDTODO: Broken on transforms branch
#print 'orig', d1
#print 'd2n and back', num2date(date2num(d1), tz)
from _transforms import Value, Interval
v1 = Value(date2num(d1))
v2 = Value(date2num(d2))
dlim = Interval(v1,v2)
vlim = Interval(v1,v2)
#locator = HourLocator(byhour=(3,15), tz=tz)
#locator = MinuteLocator(byminute=(15,30,45), tz=tz)
#locator = YearLocator(base=5, month=7, day=4, tz=tz)
#locator = MonthLocator(bymonthday=15)
locator = DayLocator(tz=tz)
locator.set_data_interval(dlim)
locator.set_view_interval(vlim)
dmin, dmax = locator.autoscale()
vlim.set_bounds(dmin, dmax)
ticks = locator()
fmt = '%Y-%m-%d %H:%M:%S %Z'
formatter = DateFormatter(fmt, tz)
#for t in ticks: print formatter(t)
for t in dates: print formatter(t)
| agpl-3.0 |
weissercn/adaptive_binning_chisquared_2sam | adaptive_binning_chisquared_2sam/chi2_adaptive_binning.py | 1 | 13270 | from __future__ import print_function
import sys
import chi2_plots
import random
import ast
import pickle
"""
This script can be used to get the p value for the Miranda method (=chi squared). It takes input files with column vectors corresponding to
features and lables.
"""
print(__doc__)
import sys
#sys.path.insert(0,'../..')
import os
from scipy import stats
import numpy as np
from sklearn import preprocessing
import matplotlib.pylab as plt
#import matplotlib.pyplot as plt
#import numpy.matlib
#from matplotlib.colors import Normalize
#from sklearn.preprocessing import StandardScaler
##############################################################################
# Setting parameters
#
#orig_name= sys.argv[1]
#number_of_splits_list= ast.literal_eval(sys.argv[2])
#print("number_of_splits_list : ", number_of_splits_list)
#dim_list = ast.literal_eval(sys.argv[3])
#comp_file_list_list = ast.literal_eval(sys.argv[4])
def norm_highD_searchsorted(l_test):
l_test = np.array(l_test).tolist()
l_set = sorted(set(l_test))
pos = [0]*len(l_test)
pos_counter = 0
for item in l_set:
matches = [i for i in range(0,len(l_test)) if l_test[i]==item]
random.shuffle(matches)
for m in matches:
pos[m]= pos_counter
pos_counter+=1
pos = np.array(pos)
pos = pos/np.float(len(l_test)-1)
return pos
def chi2_adaptive_binning_wrapper(orig_title, orig_name, dim_list, comp_file_list_list,number_of_splits_list,systematics_fraction):
sample1_name="original"
sample2_name="modified"
#transform='uniform'
transform='StandardScalar'
#transform='fill01'
DEBUG = False
##############################################################################
for dim_index, dim_data in enumerate(dim_list):
print("We are now in "+str(dim_data) + " Dimensions")
#comp_file_list=[]
comp_file_list = comp_file_list_list[dim_index]
#for i in range(2):
#comp_file_list.append((os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/higher_dimensional_gauss/gauss_data/data_high" +str(dim_data)+"Dgauss_10000_0.5_0.1_0.0_{0}.txt".format(i),os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/higher_dimensional_gauss/gauss_data/data_high"+str(dim_data)+"Dgauss_10000_0.5_0.1_0.01_{0}.txt".format(i)))
#comp_file_list.append((os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_legendre_contrib0__1__10__sample_{0}.txt".format(i),os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/legendre/legendre_data/data_legendre_contrib0__1__9__sample_{0}.txt".format(i)))
#comp_file_list.append((os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_redefined_{1}D_1000_0.6_0.2_0.1_{0}.txt".format(i,dim_data),os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_redefined_{1}D_1000_0.6_0.2_0.075_{0}.txt".format(i,dim_data)))
#print(comp_file_list)
score_dict = {}
for number_of_splits in number_of_splits_list:
score_dict[str(number_of_splits)]=[]
counter = 0
for comp_file_0,comp_file_1 in comp_file_list:
print("Operating of files :"+comp_file_0+" "+comp_file_1)
#extracts data from the files
features_0=np.loadtxt(comp_file_0,dtype='d', ndmin=2)
features_1=np.loadtxt(comp_file_1,dtype='d', ndmin=2)
#only make a plot for the first data set
results_list=chi2_adaptive_binning(features_0,features_1,number_of_splits_list,systematics_fraction,orig_title,orig_name, not counter,DEBUG, transform)
for number_of_splits_index, number_of_splits in enumerate(number_of_splits_list):
score_dict[str(number_of_splits)].append(results_list[number_of_splits_index])
counter+=1
for number_of_splits in number_of_splits_list:
name = orig_name + "_" +str(dim_data) + "D_chi2_" + str(number_of_splits) + "_splits"
title= orig_title+ " " +str(dim_data) + "D " + str(number_of_splits) + " splits"
print("score_dict[{}] : ".format(number_of_splits), score_dict[str(number_of_splits)])
with open(name+"_p_values", "wb") as test_statistics_file:
for score in score_dict[str(number_of_splits)]:
test_statistics_file.write(str(score)+"\n")
#if dim_data==2: os.rename("name_"+str(dim_data) + "D_" + str(number_of_splits) + "_splits"+"_bin_definitions_2D.png",name+"_bin_definitions_2D.png")
#if dim_data==1: os.rename("name_"+str(dim_data) + "D_" + str(number_of_splits) + "_splits"+"_bin_definitions_1D.png",name+"_binning_bin_definitions_1D.png")
chi2_plots.histo_plot_pvalue(score_dict[str(number_of_splits)],50,"p value","Frequency",title,name)
def chi2_adaptive_binning(features_0,features_1,number_of_splits_list,systematics_fraction=0.0,title = "title", name="name", PLOT = True, DEBUG = False, transform='StandardScalar'):
"""This function takes in two 2D arrays with all features being columns"""
max_number_of_splits = np.max(number_of_splits_list)
#determine how many data points are in each sample
no_0=features_0.shape[0]
no_1=features_1.shape[0]
print("features_0.shape : ", features_0.shape)
no_dim = features_0.shape[1]
#Give all samples in file 0 the label 0 and in file 1 the feature 1
label_0=np.zeros((no_0,1))
label_1=np.ones((no_1,1))
#Create an array containing samples and features.
data_0=np.c_[features_0,label_0]
data_1=np.c_[features_1,label_1]
features= np.r_[features_0,features_1]
labels= np.r_[label_0, label_1]
data=np.r_[data_0,data_1]
data_same=np.c_[features,labels]
#print("data : ",data)
#print("data_same : ", data_same)
#print("np.sum(data!=data_same) : ",np.sum(data!=data_same))
assert np.sum(data!=data_same)==0
assert (no_dim == data.shape[1]-1)
if no_dim==2:
plt.scatter(features[:,0],features[:,1], 0.1)
plt.savefig('test.png')
plt.clf()
if transform=='StandardScalar':
features = preprocessing.scale(features)
data = np.c_[features,labels]
if transform=='uniform':
#data_new2 = data[:,0]
data_new = norm_highD_searchsorted(data[:,0])
for D in range(1,no_dim):
temp = norm_highD_searchsorted(data[:,D])
data_new = np.c_[data_new,temp]
#data_new2= np.c_[data_new2,data[:,D]]
data_new = np.c_[data_new, np.r_[label_0,label_1]]
#data_new2= np.c_[data_new2,np.r_[label_0,label_1]]
print("data : ", data)
data = data_new
print("data new : ", data)
#print("data_new2 : ", data_new2)
#print("np.sum(data!=data_new2) : ",np.sum(data!=data_new2))
np.random.shuffle(data)
assert (no_dim == data.shape[1]-1)
labels=data[:,-1]
X_values= data[:,:-1]
X_max = np.amax(data,axis=0)[:-1]
X_min = np.amin(data,axis=0)[:-1]
X_total_width = (np.subtract(X_max,X_min))
del data
if transform=='fill01':
#Scaling
X_values = X_values - X_min[None,:]
X_values = X_values / X_total_width[None,:]
if True:
X_min = [0.]*no_dim
X_total_width = [1.]*no_dim
#b = X_values[:,0]
#print("b[b[:]>2].shape[0] : \n", b[b[:]>2].shape[0] )
data = np.concatenate((X_values, labels[:,None]), axis=1)
if no_dim==2:
plt.scatter(data[:,0],data[:,1],0.1)
plt.savefig('test_scaled.png')
#print("X_values.shape : ",X_values.shape)
starting_boundary = []
for i in range(no_dim):
starting_boundary.append([0.0,1.0])
#Each key has the following stricture: # of splits and for each split if it was closer (a) or further away from (b) the origin. The original bin is "0"
#For example "2ab" means it is the bin that was closer to the origin for the first split and further away for the second one.
bin_boundaries_dict = {'0' : np.array(starting_boundary)}
bin_points_dict = {'0' : data}
for split_number in range(1,1+max_number_of_splits):
for bin_key, bin_boundary in bin_boundaries_dict.items():
if str(split_number-1) in bin_key:
variances= np.var(bin_points_dict[bin_key][:,:-1], axis=0)
#print("\nvariances : ", variances)
dim_to_be_sliced = np.argmax(variances)
#print("dim_to_be_sliced : ",dim_to_be_sliced)
#print("bin_points_dict[bin_key] : ",bin_points_dict[bin_key])
#print("bin_points_dict[bin_key][:,dim_to_be_sliced] : ",bin_points_dict[bin_key][:,dim_to_be_sliced])
median = np.median(bin_points_dict[bin_key][:,dim_to_be_sliced])
#print("median : ",median)
a_bin_boundary, b_bin_boundary = bin_boundary.copy(), bin_boundary.copy()
#print("a_bin_boundary : ",a_bin_boundary)
a_bin_boundary[dim_to_be_sliced,1] = median
b_bin_boundary[dim_to_be_sliced,0] = median
bin_boundaries_dict[str(split_number)+bin_key[1:]+'a'] = a_bin_boundary
bin_boundaries_dict[str(split_number)+bin_key[1:]+'b'] = b_bin_boundary
a_points, b_points = [],[]
for event_number in range(bin_points_dict[bin_key].shape[0]):
if bin_points_dict[bin_key][event_number,dim_to_be_sliced] < median: a_points.append(bin_points_dict[bin_key][event_number,:].tolist())
else: b_points.append(bin_points_dict[bin_key][event_number,:].tolist())
bin_points_dict[str(split_number)+bin_key[1:]+'a'] = np.array(a_points)
bin_points_dict[str(split_number)+bin_key[1:]+'b'] = np.array(b_points)
#If a bin contains no particles it should be deleted
if len(a_points)==0:
del bin_points_dict[str(split_number)+bin_key[1:]+'a']
del bin_boundaries_dict[str(split_number)+bin_key[1:]+'a']
if len(b_points)==0:
del bin_points_dict[str(split_number)+bin_key[1:]+'b']
del bin_boundaries_dict[str(split_number)+bin_key[1:]+'b']
if PLOT: pickle.dump( bin_boundaries_dict, open( "bin_boundaries_dict.p", "wb" ) )
bins_sample01_dict= {}
signed_Scp2_dict= {}
results_list = []
for number_of_splits in number_of_splits_list:
print("\nnumber_of_splits : ",number_of_splits,"\nsystematics_fraction : ",systematics_fraction)
bins_sample0, bins_sample1 = [] , []
for bin_key, bin_points in bin_points_dict.items():
if str(number_of_splits) in bin_key:
labels_in_bin = bin_points[:,-1]
#print("labels_in_bin : ",labels_in_bin)
bin_sample0 = np.count_nonzero( labels_in_bin == 0)
bin_sample1 = np.count_nonzero( labels_in_bin == 1)
#print("bin_sample0 : ",bin_sample0)
#print("bin_sample1 : ",bin_sample1)
#simulate uncertainties
if(systematics_fraction*float(bin_sample0)!=0.): bin_sample0 += int(round(np.random.normal(0.,systematics_fraction*float(bin_sample0))))
if(systematics_fraction*float(bin_sample1)!=0.): bin_sample1 += int(round(np.random.normal(0.,systematics_fraction*float(bin_sample1))))
bins_sample01_dict[bin_key]=[bin_sample0,bin_sample1]
signed_Scp2_dict[bin_key] = np.square(float(bin_sample1-bin_sample0))/(float(bin_sample1)+float(bin_sample0)+np.square(float(bin_sample1)*systematics_fraction)+np.square(float(bin_sample1)*systematics_fraction))*np.sign(bin_sample1-bin_sample0)
#print("\n\nbin_sample0 : ",bin_sample0, "\n bins_sample0 : ", bins_sample0 )
#print("type(bin_sample0) : ",type(bin_sample0), " type(bins_sample0) : ",type(bins_sample0))
bins_sample0.append(bin_sample0)
#print(" bins_sample0 : ", bins_sample0, "\n\n" )
bins_sample1.append(bin_sample1)
bins_sample0, bins_sample1 = np.array(bins_sample0,dtype=float), np.array(bins_sample1, dtype=float)
print("bins_sample0 : ",bins_sample0,"\n bins_sample1 : ",bins_sample1)
#element wise subtraction and division
Scp2 = ((bins_sample1-bins_sample0)**2)/ (bins_sample1+bins_sample0+(systematics_fraction*bins_sample1)**2+(systematics_fraction*bins_sample0)**2 )
#Scp2 = np.divide(np.square(np.subtract(bins_sample1,bins_sample0)),np.add(bins_sample1,bins_sample0))
if DEBUG:
print(Scp2)
#nansum ignores all the contributions that are Not A Number (NAN)
Chi2 = np.nansum(Scp2)
if DEBUG:
print("Chi2")
print(Chi2)
dof=bins_sample0.shape[0]-1
pvalue= 1 - stats.chi2.cdf(Chi2,dof)
print("\nThe p value for Scp2 = ",Scp2," and Chi2 = ", Chi2, " is ",pvalue,"\n\n")
if DEBUG:
print(bins_sample0)
print(bins_sample1)
print("Chi2/dof : {0}".format(str(Chi2/dof)))
print("pvalue : {0}".format(str(pvalue)))
results_list.append(pvalue)
if PLOT:
if no_dim==1: chi2_plots.adaptive_binning_1Dplot(bin_boundaries_dict,data,number_of_splits,title+" "+str(no_dim) + "D "+str(number_of_splits)+ " splits ",name+"_"+str(no_dim) + "D_chi2_"+str(number_of_splits)+"_splits")
if no_dim==2: chi2_plots.adaptive_binning_2Dplot(bin_boundaries_dict,signed_Scp2_dict,number_of_splits,X_values,title+" "+str(no_dim) + "D"+str(number_of_splits)+ " splits ",name+"_"+str(no_dim) + "D_chi2_"+str(number_of_splits)+"_splits", X_min= X_min,X_total_width=X_total_width )
if no_dim>1: chi2_plots.adaptive_binning_2D1Dplot(bin_boundaries_dict,bins_sample01_dict,number_of_splits,X_values,title+" "+str(no_dim) + "D"+str(number_of_splits)+ " splits ",name+"_"+str(no_dim) + "D_chi2_"+str(number_of_splits)+"_splits", no_dim)
return results_list
| mit |
sinhrks/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 16 | 34896 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
from ..utils import deprecated
MACHINE_EPSILON = np.finfo(np.double).eps
@deprecated("l1_cross_distances is deprecated and will be removed in 0.20.")
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
@deprecated("GaussianProcess is deprecated and will be removed in 0.20. "
"Use the GaussianProcessRegressor instead.")
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The legacy Gaussian Process model class.
Note that this class is deprecated and will be removed in 0.20.
Use the GaussianProcessRegressor instead.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given attributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given attributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
dingocuster/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
lmmentel/ase-espresso | docs/source/conf.py | 1 | 10437 | # -*- coding: utf-8 -*-
#
# ase-espresso documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 25 10:09:02 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
import sphinx_bootstrap_theme
if sys.version_info.major == 3:
from unittest.mock import MagicMock
else:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['argparse', 'hostlist', 'seaborn',
'matplotlib', 'matplotlib.pyplot', 'matplotlib.colors',
'matplotlib.cm', 'scipy', 'scipy.optimize',
'scipy.interpolate', 'pandas', 'path', 'pexpect']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if not on_rtd:
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
module_dir = os.path.normpath(os.path.join(__location__, "../../"))
sys.path.insert(0, os.path.abspath(module_dir))
autosummary_generate = True
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ase-espresso'
copyright = u'2017, Lukasz Mentel'
author = u'Lukasz Mentel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.4'
# The full version, including alpha/beta/rc tags.
release = '0.3.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'bootswatch_theme': 'yeti'}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
try:
from espresso import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ase-espressodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ase-espresso.tex', u'ase-espresso Documentation',
u'Lukasz Mentel', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ase-espresso', u'ase-espresso Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ase-espresso', u'ase-espresso Documentation',
author, 'ase-espresso', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
cauchycui/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
ibmsoe/tensorflow | tensorflow/python/estimator/inputs/inputs.py | 94 | 1290 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility methods to create simple input_fns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long
from tensorflow.python.estimator.inputs.numpy_io import numpy_input_fn
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn
from tensorflow.python.util.all_util import remove_undocumented
# pylint: enable=unused-import,line-too-long
_allowed_symbols = [
'numpy_input_fn',
'pandas_input_fn'
]
remove_undocumented(__name__, allowed_exception_list=_allowed_symbols)
| apache-2.0 |
aemerick/galaxy_analysis | misc/missing_mass_analysis.py | 1 | 15245 | from matplotlib import rc
fsize = 17
rc('text', usetex=False)
rc('font', size=fsize)#, ftype=42)
line_width = 3
point_size = 30
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import yt
import numpy as np
import glob
from onezone import star
import os
import sys
from galaxy_analysis.analysis import Galaxy
from galaxy_analysis.utilities import utilities as util
import deepdish as dd
#
# Step 1: load "final" and "initial" simulations
# Step 2: Load all massive star particle remnants, final mass, initial mass, etc.
# Step 3: Run each through the star particle class in onezone
# (make sure sn ejecta for m > 25 is zero)
# Step 4: Compute for each SN ejecta, wind ejecta, and total ejecta
# Step 5: Compute:
# % error = ((Birth - Current) - (sn_ej+wind_ej)) / (sn_ej+wind_ej)
#
# Step 6: Plot cumulative distribution of SNII remnants error
def generate_model_stars(m, z, abund = ['m_tot','m_metal'], M_o = None,
include_popIII = False, PopIII_crit_z=2.2E-6):
"""
Makes a list of star objects from one zone model
"""
if M_o is None:
M_o = m
all_star = [None]*np.size(m)
if not 'm_tot' in abund:
abund.extend(['m_tot'])
if not 'm_metal' in abund:
abund.extend(['m_metal'])
ele = {}
for k in abund:
if k is 'm_tot':
val = 1.0
elif k is 'm_metal':
val = z[0]
else:
val = 0.0
ele[k] = val
sum = 0.0
for i in np.arange(np.size(m)):
if include_popIII:
if z[i] < PopIII_crit_z:
ptype = 'popIII'
else:
ptype = 'star'
else:
ptype = 'star'
all_star[i] = star.Star(M=m[i],Z=z[i], abundances=ele, M_o = M_o[i], star_type = ptype)
if ptype == 'popIII':
all_star[i].set_popIII_properties(True)
else:
all_star[i].set_SNII_properties(True)
# if m[i] > 8.0:
# print(s.sn_ejecta_masses['O'])
all_star[i].set_SNIa_properties(check_mass = True)
sum += all_star[i].sn_ejecta_masses['O']
# print("yyyyy", np.sum( [x.sn_ejecta_masses['O'] for x in all_star]), sum)
return all_star
def check_all_masses(ds, data, ds0 = None, time_cut = -1.0):
pt = data['particle_type']
# cut out DM
select = pt >= 11
pt = pt[select]
bm = data['birth_mass'][select].value
pm = data['particle_mass'][select].convert_to_units('Msun').value
z = data['metallicity_fraction'][select].value
elements = util.species_from_fields(ds.field_list)
all_stars = generate_model_stars(bm,z, abund = elements,
include_popIII = ds.parameters['IndividualStarPopIIIFormation'])
lifetime = data['dynamical_time'][select].convert_to_units('Myr')
birth = data['creation_time'][select].convert_to_units('Myr')
age = ds.current_time.convert_to_units('Myr') - birth
model_wind_ejecta = {} # total_wind_ejecta
for k in all_stars[0].wind_ejecta_masses().keys():
model_wind_ejecta[k] = np.array([x.wind_ejecta_masses()[k] for x in all_stars])
model_sn_ejecta = {}
for k in all_stars[0].sn_ejecta_masses.keys():
model_sn_ejecta[k] = np.array([x.sn_ejecta_masses[k] for x in all_stars])
# correct for AGB stars that haven't died
AGB = (bm < 8.0) * (pt == 11)
select = (bm > 8.0) * (pt == 11)
factor = age / lifetime
factor[factor>1.0] = 1.0
time_select = birth > time_cut
#
# Apply correction and zero out SN abundances for stars that have not gone SNe
#
for k in list(model_wind_ejecta.keys()):
model_wind_ejecta[k][AGB] = 0.0
model_sn_ejecta[k][ (pt == 11) ] = 0.0 # regular stars
model_sn_ejecta[k][ (pt == 14) ] = 0.0 # popIII stars
model_wind_ejecta[k][select] = model_wind_ejecta[k][select]*factor[select]
total_model_ejecta = {}
for k in list(model_wind_ejecta.keys()):
total_model_ejecta[k] = np.sum(model_sn_ejecta[k][time_select]) + np.sum(model_wind_ejecta[k][time_select])
#print("xxxxxx", np.sum(model_sn_ejecta['O']), np.sum(model_sn_ejecta['O'][bm>8.0]), np.sum(model_sn_ejecta['O'][bm<8.0]))
# construct the indivdual mode dictionary
separate_mode_ejecta = {'AGB' : {}, 'SWind' : {}, 'SNII' : {}, 'SNIa' : {} , 'PopIII' : {}, 'Total' : {}}
for k in list(model_wind_ejecta.keys()):
separate_mode_ejecta['PopIII'][k] = np.sum(model_sn_ejecta[k][(pt==13)*(z<2.2E-6)])
separate_mode_ejecta['SNII'][k] = np.sum(model_sn_ejecta[k][(bm > 8.0)*(z>2.2E-6)])
separate_mode_ejecta['SNIa'][k] = np.sum(model_sn_ejecta[k][(bm < 8.0)*(z>2.2E-6)])
separate_mode_ejecta['SWind'][k] = np.sum(model_wind_ejecta[k][bm > 8.0])
separate_mode_ejecta['AGB'][k] = np.sum(model_wind_ejecta[k][bm < 8.0])
separate_mode_ejecta['Total'][k] = np.sum( [separate_mode_ejecta[x][k] for x in ['AGB','SWind','SNII','SNIa','PopIII'] ])
for k in list(separate_mode_ejecta.keys()):
separate_mode_ejecta[k]['Total Tracked Metals'] = np.sum( [separate_mode_ejecta[k][x] for x in list(separate_mode_ejecta[k].keys()) if (not x in ['m_tot','m_metal','H','He'])] )
if os.path.exists(str(ds) + '_galaxy_data.h5'):
dd_data = dd.io.load(str(ds) + '_galaxy_data.h5')
dd_data['gas_meta_data']['masses']['Type'] = separate_mode_ejecta
dd.io.save( str(ds) + '_galaxy_data.h5',dd_data)
# now do this for the individual abundances on grid:
grid_masses = {}
for k in list(model_wind_ejecta.keys()):
if k is 'm_tot' or k is 'm_metal':
continue
grid_masses[k] = np.sum(data[k + '_Density'] * ds.mass_unit / ds.length_unit**3 *\
data['cell_volume']).convert_to_units('Msun').value
if not (ds0 is None):
grid_masses[k] = grid_masses[k] - np.sum(ds0[k + '_Density'] * ds0.mass_unit / ds0.length_unit**3 *\
ds0['cell_volume']).convert_to_units('Msun').value
# else:
# grid_masses[k] = grid_masses[k] - 1.0E-10 * np.sum(data['cell_mass'].to('Msun')).value
gal = Galaxy(str(ds))
outflow_masses = gal.boundary_mass_flux
#print total_model_ejecta
#print grid_masses
#print outflow_masses
for k in separate_mode_ejecta.keys():
print(k, separate_mode_ejecta[k]['O'], separate_mode_ejecta[k]['N'])
print(list(grid_masses.keys()))
print("Element Total_on_Grid Total_Outflow Sum_Injected Total_model_mass Percent_error")
for k in list(grid_masses.keys()):
okey = k + '_Density'
error =100 * (outflow_masses[okey] + grid_masses[k] - total_model_ejecta[k] ) / total_model_ejecta[k]
print("%2s %8.8E %8.8E %8.8E %8.8E %4.4f"%(k,grid_masses[k], outflow_masses[okey], grid_masses[k] + outflow_masses[okey],
total_model_ejecta[k], error))
return all_stars, model_sn_ejecta, model_wind_ejecta, total_model_ejecta
def check_wind_ejecta(ds, data):
pt = data['particle_type']
# cut out DM
select = pt >= 11
pt = pt[select]
bm = data['birth_mass'][select].value
pm = data['particle_mass'][select].convert_to_units('Msun').value
z = data['metallicity_fraction'][select].value
elements = util.species_from_fields(ds.field_list)
all_stars = generate_model_stars(bm,z, abund = elements,
include_popIII = ds.parameters['IndividualStarPopIIIFormation'])
lifetime = data['dynamical_time'][select].convert_to_units('Myr')
birth = data['creation_time'][select].convert_to_units('Myr')
age = ds.current_time.convert_to_units('Myr') - birth
# total wind ejecta over entire lifetime
total_wind_ejecta = np.array([x.wind_ejecta_masses()['m_tot'] for x in all_stars])
# correct for AGB stars that haven't died
AGB = (bm < 8.0)
model_wind_ejecta = total_wind_ejecta * 1.0
model_wind_ejecta[ AGB * (pt == 11)] = 0.0
# adjust wind to correct fraction given lifetime
select = (bm > 8.0) * (pt == 11)
factor = age / lifetime
factor[factor>1.0] = 1.0
model_wind_ejecta[select] = model_wind_ejecta[select] * factor[select]
# load actual injection from simulation
actual_wind_ejecta = data['wind_mass_ejected'][select].value
# compute percent error
model_wind_ejecta = model_wind_ejecta[age>1]
actual_wind_ejecta = actual_wind_ejecta[age>1]
error = (model_wind_ejecta - actual_wind_ejecta)
error[model_wind_ejecta>0] = error[model_wind_ejecta>0]/model_wind_ejecta[model_wind_ejecta>0]
error_mass = error[model_wind_ejecta>0]
all = 1.0 * np.size(error_mass)
print(np.size( error_mass[ (np.abs(error_mass) < 0.05) ])/all)
print(np.size( error_mass[ (np.abs(error_mass) < 0.10) ])/all)
print(np.size( error_mass[ (np.abs(error_mass) < 0.15) ])/all)
print(np.size( error_mass[ (np.abs(error_mass) < 0.20) ])/all)
print(np.size( error_mass[ (np.abs(error_mass) < 0.25) ])/all)
#error_mass = error_mass[birth[model_wind_ejecta>0] > 110]
#error_mass = error_mass[error_mass>0]
print(np.min(error_mass), np.max(error_mass), np.average(error_mass), np.median(error_mass))
print(error_mass)
select = (age>1)
bm = bm[select]
pm = pm[select]
age = age[select]
lifetime = lifetime[select]
total_wind_ejecta = total_wind_ejecta[select]
select = (model_wind_ejecta>0)
bm = bm[select]
pm = pm[select]
age = age[select]
lifetime = lifetime[select]
model_wind_ejecta = model_wind_ejecta[select]
actual_wind_ejecta = actual_wind_ejecta[select]
total_wind_ejecta = total_wind_ejecta[select]
#print("BM PM Percent_error Model_wind actual_wind lifetime_wind")
#for i in np.arange(np.size(error_mass)):
# print("%5.5f %3.3f %5.5f %5.5E %5.5E %5.5E"%(bm[i],pm[i],error_mass[i]*100,model_wind_ejecta[i], actual_wind_ejecta[i], total_wind_ejecta[i]))
#print(np.min(error_mass), np.max(error_mass), np.average(error_mass), np.median(error_mass))
# print bm[error > 0.9], pm[error>0.9], pt[error>0.9]
# print age[error>0.9]
# print actual_wind_ejecta[error>0.9]
# print model_wind_ejecta[error>0.9]
#print actual_wind_ejecta[birth > 110]
#print model_wind_ejecta[birth > 110]
return
def compute_SNII_error(ds, data, uselog = True):
pt = data['particle_type']
select = pt >= 11
pt = pt[select]
pm = data['particle_mass'][select].convert_to_units('Msun').value
bm = data['birth_mass'][select].value
z = data['metallicity_fraction'][select].value
# select all particles that could have gone supernova
select = ((pt == 13) * (bm > 8.0) * (bm < 25.0)) +\
((pt == 13) * (z < 2.2E-6) * ((bm > 11.0) * (bm<40.0) + (bm>140.0)*(bm<260.0)))
pm = pm[select]
bm = bm[select]
z = z[select]
elements = util.species_from_fields(ds.field_list)
all_stars = generate_model_stars(bm, z, abund = elements)
total_ejecta = np.zeros(np.size(bm))
error = np.zeros(np.size(bm))
wind_error = np.zeros(np.size(bm))
sn_error = np.zeros(np.size(bm))
ej_frac = np.zeros(np.size(bm))
for i,s in enumerate(all_stars):
s.set_SNII_properties(True)
wind = s.wind_ejecta_masses()
sn = s.sn_ejecta_masses
total_ejecta[i] = wind['m_tot'] + sn['m_tot']
error[i] = ( -1.0*(bm[i]-pm[i]) + total_ejecta[i]) / (total_ejecta[i])
ej_frac[i] = (bm[i]-pm[i]) / total_ejecta[i]
wind_error[i] = ( wind['m_tot'] / total_ejecta[i] )
sn_error[i] = ( sn['m_tot'] / total_ejecta[i] )
snavg , snstd = np.average(sn_error), np.std(sn_error)
windavg, windstd = np.average(wind_error), np.std(wind_error)
# now plot cumulative distribution of positive error (error > 0 = missing mass)
pos_error = error[error>0]
fig, ax = plt.subplots()
if uselog:
xdata = np.log10(pos_error)
bins = np.arange(-4, 1.0, 0.025)
else:
xdata = pos_error
bins = np.linspace(0.0, 1.0, 200)
hist,bins = np.histogram(np.log10(ej_frac), bins = bins)
cent = (bins[1:] + bins[:-1])*0.5
ax.plot(cent, np.cumsum(hist) / (1.0*np.sum(hist)), lw = 3, color = 'black')
ylim = [0.0, 1.05]
ax.set_ylim(ylim)
def _plot_line(x, color, ls, log, label):
if log:
if x <= 0:
return
x = np.log10(x)
ax.plot([x,x],ylim, color = color, ls = ls, label = label, lw = 2)
return
# _plot_line(snavg, 'blue', '-', uselog, 'SN fraction')
# _plot_line(snavg-snstd, 'blue', '-', uselog, None)
# _plot_line(snavg+snstd, 'blue', '-', uselog, None)
# _plot_line(windavg, 'purple', '-', uselog, 'Wind fraction')
# _plot_line(windavg - windstd, 'purple', '--', uselog, None)
# _plot_line(windavg + windstd, 'purple', '--', uselog, None)
ax.set_xlabel('Fraction of Mass Actually Injected')
ax.set_ylabel('Fraction of SN')
fig.set_size_inches(8,8)
plt.tight_layout()
plt.minorticks_on()
fig.savefig('sn_cum_mass_error.png')
plt.close()
#
#
# histogram
#
#
fig, ax = plt.subplots()
if uselog:
xdata = np.log10(pos_error)
bins = np.arange(-2, 0.05, 0.025)
else:
xdata = pos_error
bins = np.linspace(0.0, 1.0, 200)
hist,bins = np.histogram(xdata, bins = bins)
cent = (bins[1:] + bins[:-1])*0.5
ax.plot(cent, hist, lw = 3, color = 'black')
energy_error = ( np.sum(pos_error)) / (np.size(pos_error)*1.0)
ax.plot([np.average(pos_error),np.average(pos_error)], [0,np.max(hist)], color = 'black' ,ls = '--', lw = 3)
ax.annotate("Energy Error = %0.2f percent"%(100*energy_error), xy=(0.5,0.9*np.max(hist)),
xytext=(0.5,0.9*np.max(hist)))
print(energy_error)
ax.set_ylim([0,np.max(hist)])
ax.set_xlabel('Error in Ejected Mass')
ax.set_ylabel('Counts')
fig.set_size_inches(8,8)
plt.tight_layout()
plt.minorticks_on()
fig.savefig('sn_mass_error.png')
return error, fig, ax
if __name__=="__main__":
name_list = np.sort(glob.glob('DD????/DD????'))
if np.size(sys.argv) == 1:
try:
ds = yt.load(name_list[-1])
except:
print("Could not load ", name_list[-1], " trying the next one")
ds = yt.load(name_list[-2])
else:
# name = 'DD%0004i'%( int(sys.argv[1]))
name = str( sys.argv[1] )
ds = yt.load( name + '/' + name)
data = ds.all_data()
# if ('enzo','wind_mass_ejected') in ds.field_list or\
# ('io','wind_mass_ejected') in ds.field_list:
# try:
# check_wind_ejecta(ds,data)
# except:
# print("failing in wind ejecta")
# try:
# error, fig, ax = compute_SNII_error(ds,data, uselog=True)
# except:
# print("failing in SNII check")
# ds0 = yt.load('./../lowres/Dds0035/Dds0035')
# ds0 = ds0.all_data()
check_all_masses(ds,data) #, ds0 = ds0)
| mit |
mhdella/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
CivicKnowledge/ambry | ambry/orm/partition.py | 1 | 48989 | """Object-Rlational Mapping classess, based on Sqlalchemy, for representing partitions.
Copyright (c) 2015 Civic Knowledge. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
__docformat__ = 'restructuredtext en'
from collections import OrderedDict
import six
from six import string_types
from geoid.util import isimplify
from geoid.civick import GVid
from geoid import parse_to_gvid
from dateutil import parser
from sqlalchemy import event
from sqlalchemy import Column as SAColumn, Integer, UniqueConstraint
from sqlalchemy import String, ForeignKey
from sqlalchemy.orm import relationship, object_session, backref
from ambry.identity import ObjectNumber, PartialPartitionName, PartitionIdentity
from ambry.orm.columnstat import ColumnStat
from ambry.orm.dataset import Dataset
from ambry.util import Constant
import logging
from ambry.util import get_logger
logger = get_logger(__name__)
# logger.setLevel(logging.DEBUG)
from . import Base, MutationDict, MutationList, JSONEncodedObj, BigIntegerType
class PartitionDisplay(object):
"""Helper object to select what to display for titles and descriptions"""
def __init__(self, p):
self._p = p
desc_used = False
self.title = self._p.title
self.description = ''
if not self.title:
self.title = self._p.table.description
desc_used = True
if not self.title:
self.title = self._p.vname
if not desc_used:
self.description = self._p.description.strip('.') + '.' if self._p.description else ''
self.notes = self._p.notes
@property
def geo_description(self):
"""Return a description of the geographic extents, using the largest scale
space and grain coverages"""
sc = self._p.space_coverage
gc = self._p.grain_coverage
if sc and gc:
if parse_to_gvid(gc[0]).level == 'state' and parse_to_gvid(sc[0]).level == 'state':
return parse_to_gvid(sc[0]).geo_name
else:
return ("{} in {}".format(
parse_to_gvid(gc[0]).level_plural.title(),
parse_to_gvid(sc[0]).geo_name))
elif sc:
return parse_to_gvid(sc[0]).geo_name.title()
elif sc:
return parse_to_gvid(gc[0]).level_plural.title()
else:
return ''
@property
def time_description(self):
"""String description of the year or year range"""
tc = [t for t in self._p.time_coverage if t]
if not tc:
return ''
mn = min(tc)
mx = max(tc)
if not mn and not mx:
return ''
elif mn == mx:
return mn
else:
return "{} to {}".format(mn, mx)
@property
def sub_description(self):
"""Time and space dscription"""
gd = self.geo_description
td = self.time_description
if gd and td:
return '{}, {}. {} Rows.'.format(gd, td, self._p.count)
elif gd:
return '{}. {} Rows.'.format(gd, self._p.count)
elif td:
return '{}. {} Rows.'.format(td, self._p.count)
else:
return '{} Rows.'.format(self._p.count)
class Partition(Base):
__tablename__ = 'partitions'
STATES = Constant()
STATES.SYNCED = 'synced'
STATES.CLEANING = 'cleaning'
STATES.CLEANED = 'cleaned'
STATES.PREPARING = 'preparing'
STATES.PREPARED = 'prepared'
STATES.BUILDING = 'building'
STATES.BUILT = 'built'
STATES.COALESCING = 'coalescing'
STATES.COALESCED = 'coalesced'
STATES.ERROR = 'error'
STATES.FINALIZING = 'finalizing'
STATES.FINALIZED = 'finalized'
STATES.INSTALLING = 'installing'
STATES.INSTALLED = 'installed'
TYPE = Constant
TYPE.SEGMENT = 's'
TYPE.UNION = 'u'
sequence_id = SAColumn('p_sequence_id', Integer)
vid = SAColumn('p_vid', String(16), primary_key=True, nullable=False)
id = SAColumn('p_id', String(13), nullable=False)
d_vid = SAColumn('p_d_vid', String(13), ForeignKey('datasets.d_vid'), nullable=False, index=True)
t_vid = SAColumn('p_t_vid', String(15), ForeignKey('tables.t_vid'), nullable=False, index=True)
name = SAColumn('p_name', String(200), nullable=False, index=True)
vname = SAColumn('p_vname', String(200), unique=True, nullable=False, index=True)
fqname = SAColumn('p_fqname', String(200), unique=True, nullable=False, index=True)
title = SAColumn('p_title', String())
description = SAColumn('p_description', String())
notes = SAColumn('p_notes', String())
cache_key = SAColumn('p_cache_key', String(200), unique=True, nullable=False, index=True)
parent_vid = SAColumn('p_p_vid', String(16), ForeignKey('partitions.p_vid'), nullable=True, index=True)
ref = SAColumn('p_ref', String(16), index=True,
doc='VID reference to an eariler version to use instead of this one.')
type = SAColumn('p_type', String(20), default=TYPE.UNION,
doc='u - normal partition, s - segment')
table_name = SAColumn('p_table_name', String(50))
time = SAColumn('p_time', String(20)) # FIXME: add helptext
space = SAColumn('p_space', String(50))
grain = SAColumn('p_grain', String(50))
variant = SAColumn('p_variant', String(50))
format = SAColumn('p_format', String(50))
segment = SAColumn('p_segment', Integer,
doc='Part of a larger partition. segment_id is usually also a source ds_id')
epsg = SAColumn('p_epsg', Integer, doc='EPSG SRID for the reference system of a geographic dataset. ')
# The partition could hold data that is considered a dimension -- if multiple datasets
# were joined, that dimension would be a dimension column, but it only has a single
# value in each partition.
# That could be part of the name, or it could be declared in a table, with a single value for all of the
# rows in a partition.
min_id = SAColumn('p_min_id', BigIntegerType)
max_id = SAColumn('p_max_id', BigIntegerType)
count = SAColumn('p_count', Integer)
state = SAColumn('p_state', String(50))
data = SAColumn('p_data', MutationDict.as_mutable(JSONEncodedObj))
space_coverage = SAColumn('p_scov', MutationList.as_mutable(JSONEncodedObj))
time_coverage = SAColumn('p_tcov', MutationList.as_mutable(JSONEncodedObj))
grain_coverage = SAColumn('p_gcov', MutationList.as_mutable(JSONEncodedObj))
installed = SAColumn('p_installed', String(100))
_location = SAColumn('p_location', String(100)) # Location of the data file
__table_args__ = (
# ForeignKeyConstraint( [d_vid, d_location], ['datasets.d_vid','datasets.d_location']),
UniqueConstraint('p_sequence_id', 'p_d_vid', name='_uc_partitions_1'),
)
# For the primary table for the partition. There is one per partition, but a table
# can be primary in multiple partitions.
table = relationship('Table', backref='partitions', foreign_keys='Partition.t_vid')
stats = relationship(ColumnStat, backref='partition', cascade='all, delete, delete-orphan')
children = relationship('Partition', backref=backref('parent', remote_side=[vid]), cascade='all')
_bundle = None # Set when returned from a bundle.
_datafile = None # TODO: Unused variable.
_datafile_writer = None # TODO: Unused variable.
_stats_dict = None
@property
def identity(self):
"""Return this partition information as a PartitionId."""
if self.dataset is None:
# The relationship will be null until the object is committed
s = object_session(self)
ds = s.query(Dataset).filter(Dataset.id_ == self.d_id).one()
else:
ds = self.dataset
d = {
'id': self.id,
'vid': self.vid,
'name': self.name,
'vname': self.vname,
'ref': self.ref,
'space': self.space,
'time': self.time,
'table': self.table_name,
'grain': self.grain,
'variant': self.variant,
'segment': self.segment,
'format': self.format if self.format else 'db'
}
return PartitionIdentity.from_dict(dict(list(ds.dict.items()) + list(d.items())))
@property
def display(self):
"""Return an acessor object to get display titles and descriptions"""
return PartitionDisplay(self)
@property
def bundle(self):
return self._bundle # Set externally, such as Bundle.wrap_partition
@property
def is_segment(self):
return self.type == self.TYPE.SEGMENT
@property
def headers(self):
return [c.name for c in self.table.columns]
def __repr__(self):
return '<partition: {} {}>'.format(self.vid, self.vname)
def set_stats(self, stats):
self.stats[:] = [] # Delete existing stats
for c in self.table.columns:
if c.name not in stats:
continue
d = stats[c.name].dict
del d['name']
del d['flags']
cs = ColumnStat(p_vid=self.vid, d_vid=self.d_vid, c_vid=c.vid, **d)
self.stats.append(cs)
def parse_gvid_or_place(self, gvid_or_place):
try:
return parse_to_gvid(gvid_or_place)
except KeyError:
places = list(self._bundle._library.search.search_identifiers(gvid_or_place))
if not places:
err_msg = "Failed to find space identifier '{}' in full " \
"text identifier search for partition '{}'" \
.format(gvid_or_place, str(self.identity))
self._bundle.error(err_msg)
return None
return parse_to_gvid(places[0].vid)
def set_coverage(self, stats):
""""Extract time space and grain coverage from the stats and store them in the partition"""
from ambry.util.datestimes import expand_to_years
scov = set()
tcov = set()
grains = set()
def summarize_maybe(gvid):
try:
return parse_to_gvid(gvid).summarize()
except:
return None
def simplifiy_maybe(values, column):
parsed = []
for gvid in values:
# The gvid should not be a st
if gvid is None or gvid == 'None':
continue
try:
parsed.append(parse_to_gvid(gvid))
except ValueError as e:
if self._bundle:
self._bundle.warn("While analyzing geo coverage in final partition stage, " +
"Failed to parse gvid '{}' in {}.{}: {}"
.format(str(gvid), column.table.name, column.name, e))
try:
return isimplify(parsed)
except:
return None
def int_maybe(year):
try:
return int(year)
except:
return None
for c in self.table.columns:
if c.name not in stats:
continue
try:
if stats[c.name].is_gvid or stats[c.name].is_geoid:
scov |= set(x for x in simplifiy_maybe(stats[c.name].uniques, c))
grains |= set(summarize_maybe(gvid) for gvid in stats[c.name].uniques)
elif stats[c.name].is_year:
tcov |= set(int_maybe(x) for x in stats[c.name].uniques)
elif stats[c.name].is_date:
# The fuzzy=True argument allows ignoring the '-' char in dates produced by .isoformat()
try:
tcov |= set(parser.parse(x, fuzzy=True).year if isinstance(x, string_types) else x.year for x in
stats[c.name].uniques)
except ValueError:
pass
except Exception as e:
self._bundle.error("Failed to set coverage for column '{}', partition '{}': {}"
.format(c.name, self.identity.vname, e))
raise
# Space Coverage
if 'source_data' in self.data:
for source_name, source in list(self.data['source_data'].items()):
scov.add(self.parse_gvid_or_place(source['space']))
if self.identity.space: # And from the partition name
try:
scov.add(self.parse_gvid_or_place(self.identity.space))
except ValueError:
# Couldn't parse the space as a GVid
pass
# For geo_coverage, only includes the higher level summary levels, counties, states,
# places and urban areas.
self.space_coverage = sorted([str(x) for x in scov if bool(x) and x.sl
in (10, 40, 50, 60, 160, 400)])
#
# Time Coverage
# From the source
# If there was a time value in the source that this partition was created from, then
# add it to the years.
if 'source_data' in self.data:
for source_name, source in list(self.data['source_data'].items()):
if 'time' in source:
for year in expand_to_years(source['time']):
if year:
tcov.add(year)
# From the partition name
if self.identity.name.time:
for year in expand_to_years(self.identity.name.time):
if year:
tcov.add(year)
self.time_coverage = [t for t in tcov if t]
#
# Grains
if 'source_data' in self.data:
for source_name, source in list(self.data['source_data'].items()):
if 'grain' in source:
grains.add(source['grain'])
self.grain_coverage = sorted(str(g) for g in grains if g)
@property
def dict(self):
"""A dict that holds key/values for all of the properties in the
object.
:return:
"""
d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs
if p.key not in ('table', 'dataset', '_codes', 'stats', 'data', 'process_records')}
if self.data:
# Copy data fields into top level dict, but don't overwrite existind values.
for k, v in six.iteritems(self.data):
if k not in d and k not in ('table', 'stats', '_codes', 'data'):
d[k] = v
return d
@property
def detail_dict(self):
"""A more detailed dict that includes the descriptions, sub descriptions, table
and columns."""
d = self.dict
def aug_col(c):
d = c.dict
d['stats'] = [s.dict for s in c.stats]
return d
d['table'] = self.table.dict
d['table']['columns'] = [aug_col(c) for c in self.table.columns]
return d
@property
def stats_dict(self):
class Bunch(object):
"""Dict and object access to properties"""
def __init__(self, o):
self.__dict__.update(o)
def __str__(self):
return str(self.__dict__)
def __repr__(self):
return repr(self.__dict__)
def keys(self):
return list(self.__dict__.keys())
def items(self):
return list(self.__dict__.items())
def iteritems(self):
return iter(self.__dict__.items())
def __getitem__(self, k):
if k in self.__dict__:
return self.__dict__[k]
else:
from . import ColumnStat
return ColumnStat(hist=[])
if not self._stats_dict:
cols = {s.column.name: Bunch(s.dict) for s in self.stats}
self._stats_dict = Bunch(cols)
return self._stats_dict
def build_sample(self):
name = self.table.name
count = int(
self.database.connection.execute('SELECT count(*) FROM "{}"'.format(name)).fetchone()[0])
skip = count / 20
if count > 100:
sql = 'SELECT * FROM "{}" WHERE id % {} = 0 LIMIT 20'.format(name, skip)
else:
sql = 'SELECT * FROM "{}" LIMIT 20'.format(name)
sample = []
for j, row in enumerate(self.database.connection.execute(sql)):
sample.append(list(row.values()))
self.record.data['sample'] = sample
s = self.bundle.database.session
s.merge(self.record)
s.commit()
@property
def row(self):
# Use an Ordered Dict to make it friendly to creating CSV files.
SKIP_KEYS = [
'sequence_id', 'vid', 'id', 'd_vid', 't_vid', 'min_key', 'max_key',
'installed', 'ref', 'count', 'state', 'data', 'space_coverage',
'time_coverage', 'grain_coverage', 'name', 'vname', 'fqname', 'cache_key'
]
d = OrderedDict([('table', self.table.name)] +
[(p.key, getattr(self, p.key)) for p in self.__mapper__.attrs
if p.key not in SKIP_KEYS])
return d
def update(self, **kwargs):
if 'table' in kwargs:
del kwargs['table'] # In source_schema.csv, this is the name of the table, not the object
for k, v in list(kwargs.items()):
if hasattr(self, k):
setattr(self, k, v)
def finalize(self, ps=None):
self.state = self.STATES.FINALIZING
# Write the stats for this partition back into the partition
with self.datafile.writer as w:
for i, c in enumerate(self.table.columns, 1):
wc = w.column(i)
assert wc.pos == c.sequence_id, (c.name, wc.pos, c.sequence_id)
wc.name = c.name
wc.description = c.description
wc.type = c.python_type.__name__
self.count = w.n_rows
w.finalize()
if self.type == self.TYPE.UNION:
ps.update('Running stats ', state='running')
stats = self.datafile.run_stats()
self.set_stats(stats)
self.set_coverage(stats)
self._location = 'build'
self.title = PartitionDisplay(self).title
self.description = PartitionDisplay(self).description
self.state = self.STATES.FINALIZED
# =============
# These methods are a bit non-cohesive, since they require the _bundle value to be set, which is
# set externally, when the object is retured from a bundle.
def clean(self):
"""Remove all built files and return the partition to a newly-created state"""
if self.datafile:
self.datafile.remove()
@property
def location(self):
base_location = self._location
if not base_location:
return None
if self._bundle.build_fs.exists(base_location):
if self._bundle.build_fs.hashsyspath(base_location):
return self._bundle.build_fs.getsyspath(base_location)
return base_location
@location.setter
def location(self, v):
self._location = v
@property
def datafile(self):
from ambry.exc import NotFoundError
if self.is_local:
# Use the local version, if it exists
logger.debug('datafile: Using local datafile {}'.format(self.vname))
return self.local_datafile
else:
# If it doesn't try to get the remote.
try:
logger.debug('datafile: Using remote datafile {}'.format(self.vname))
return self.remote_datafile
except NotFoundError:
# If the remote doesnt exist, return the local, so the caller can call exists() on it,
# get its path, etc.
return self.local_datafile
@property
def local_datafile(self):
"""Return the datafile for this partition, from the build directory, the remote, or the warehouse"""
from ambry_sources import MPRowsFile
from fs.errors import ResourceNotFoundError
from ambry.orm.exc import NotFoundError
try:
return MPRowsFile(self._bundle.build_fs, self.cache_key)
except ResourceNotFoundError:
raise NotFoundError(
'Could not locate data file for partition {} (local)'.format(self.identity.fqname))
@property
def remote(self):
"""
Return the remote for this partition
:return:
"""
from ambry.exc import NotFoundError
ds = self.dataset
if 'remote_name' not in ds.data:
raise NotFoundError('Could not determine remote for partition: {}'.format(self.identity.fqname))
return self._bundle.library.remote(ds.data['remote_name'])
@property
def remote_datafile(self):
from fs.errors import ResourceNotFoundError
from ambry.exc import AccessError, NotFoundError
from boto.exception import S3ResponseError
try:
from ambry_sources import MPRowsFile
remote = self.remote
datafile = MPRowsFile(remote.fs, self.cache_key)
if not datafile.exists:
raise NotFoundError(
'Could not locate data file for partition {} from remote {} : file does not exist'
.format(self.identity.fqname, remote))
except ResourceNotFoundError as e:
raise NotFoundError('Could not locate data file for partition {} (remote): {}'
.format(self.identity.fqname, e))
except S3ResponseError as e:
# HACK. It looks like we get the response error with an access problem when
# we have access to S3, but the file doesn't exist.
raise NotFoundError("Can't access MPR file for {} in remote {}".format(self.cache_key, remote.fs))
return datafile
@property
def is_local(self):
"""Return true is the partition file is local"""
from ambry.orm.exc import NotFoundError
try:
if self.local_datafile.exists:
return True
except NotFoundError:
pass
return False
def localize(self, ps=None):
"""Copy a non-local partition file to the local build directory"""
from filelock import FileLock
from ambry.util import ensure_dir_exists
from ambry_sources import MPRowsFile
from fs.errors import ResourceNotFoundError
if self.is_local:
return
local = self._bundle.build_fs
b = self._bundle.library.bundle(self.identity.as_dataset().vid)
remote = self._bundle.library.remote(b)
lock_path = local.getsyspath(self.cache_key + '.lock')
ensure_dir_exists(lock_path)
lock = FileLock(lock_path)
if ps:
ps.add_update(message='Localizing {}'.format(self.identity.name),
partition=self,
item_type='bytes',
state='downloading')
if ps:
def progress(bts):
if ps.rec.item_total is None:
ps.rec.item_count = 0
if not ps.rec.data:
ps.rec.data = {} # Should not need to do this.
return self
item_count = ps.rec.item_count + bts
ps.rec.data['updates'] = ps.rec.data.get('updates', 0) + 1
if ps.rec.data['updates'] % 32 == 1:
ps.update(message='Localizing {}'.format(self.identity.name),
item_count=item_count)
else:
from ambry.bundle.process import call_interval
@call_interval(5)
def progress(bts):
self._bundle.log("Localizing {}. {} bytes downloaded".format(self.vname, bts))
def exception_cb(e):
raise e
with lock:
# FIXME! This won't work with remote ( http) API, only FS ( s3:, file:)
if self.is_local:
return self
try:
with remote.fs.open(self.cache_key + MPRowsFile.EXTENSION, 'rb') as f:
event = local.setcontents_async(self.cache_key + MPRowsFile.EXTENSION,
f,
progress_callback=progress,
error_callback=exception_cb)
event.wait()
if ps:
ps.update_done()
except ResourceNotFoundError as e:
from ambry.orm.exc import NotFoundError
raise NotFoundError("Failed to get MPRfile '{}' from {}: {} "
.format(self.cache_key, remote.fs, e))
return self
@property
def reader(self):
from ambry.orm.exc import NotFoundError
from fs.errors import ResourceNotFoundError
"""The reader for the datafile"""
try:
return self.datafile.reader
except ResourceNotFoundError:
raise NotFoundError("Failed to find partition file, '{}' "
.format(self.datafile.path))
def select(self, predicate=None, headers=None):
"""
Select rows from the reader using a predicate to select rows and and itemgetter to return a
subset of elements
:param predicate: If defined, a callable that is called for each row, and if it returns true, the
row is included in the output.
:param headers: If defined, a list or tuple of header names to return from each row
:return: iterable of results
WARNING: This routine works from the reader iterator, which returns RowProxy objects. RowProxy objects
are reused, so if you construct a list directly from the output from this method, the list will have
multiple copies of a single RowProxy, which will have as an inner row the last result row. If you will
be directly constructing a list, use a getter that extracts the inner row, or which converts the RowProxy
to a dict:
list(s.datafile.select(lambda r: r.stusab == 'CA', lambda r: r.dict ))
"""
# FIXME; in Python 3, use yield from
with self.reader as r:
for row in r.select(predicate, headers):
yield row
def __iter__(self):
""" Iterator over the partition, returning RowProxy objects.
:return: a generator
"""
with self.reader as r:
for row in r:
yield row
@property
def analysis(self):
"""Return an AnalysisPartition proxy, which wraps this partition to provide acess to
dataframes, shapely shapes and other analysis services"""
if isinstance(self, PartitionProxy):
return AnalysisPartition(self._obj)
else:
return AnalysisPartition(self)
@property
def measuredim(self):
"""Return a MeasureDimension proxy, which wraps the partition to provide access to
columns in terms of measures and dimensions"""
if isinstance(self, PartitionProxy):
return MeasureDimensionPartition(self._obj)
else:
return MeasureDimensionPartition(self)
# ============================
def update_id(self, sequence_id=None):
"""Alter the sequence id, and all of the names and ids derived from it. This
often needs to be done after an IntegrityError in a multiprocessing run"""
if sequence_id:
self.sequence_id = sequence_id
self._set_ids(force=True)
if self.dataset:
self._update_names()
def _set_ids(self, force=False):
if not self.sequence_id:
from .exc import DatabaseError
raise DatabaseError('Sequence ID must be set before insertion')
if not self.vid or force:
assert bool(self.d_vid)
assert bool(self.sequence_id)
don = ObjectNumber.parse(self.d_vid)
assert don.revision
on = don.as_partition(self.sequence_id)
self.vid = str(on.rev(don.revision))
self.id = str(on.rev(None))
if not self.data:
self.data = {}
def _update_names(self):
"""Update the derived names"""
d = dict(
table=self.table_name,
time=self.time,
space=self.space,
grain=self.grain,
variant=self.variant,
segment=self.segment
)
assert self.dataset
name = PartialPartitionName(**d).promote(self.dataset.identity.name)
self.name = str(name.name)
self.vname = str(name.vname)
self.cache_key = name.cache_key
self.fqname = str(self.identity.fqname)
@staticmethod
def before_insert(mapper, conn, target):
"""event.listen method for Sqlalchemy to set the sequence for this
object and create an ObjectNumber value for the id_"""
target._set_ids()
if target.name and target.vname and target.cache_key and target.fqname and not target.dataset:
return
Partition.before_update(mapper, conn, target)
@staticmethod
def before_update(mapper, conn, target):
target._update_names()
@staticmethod
def before_delete(mapper, conn, target):
pass
event.listen(Partition, 'before_insert', Partition.before_insert)
event.listen(Partition, 'before_update', Partition.before_update)
event.listen(Partition, 'before_delete', Partition.before_delete)
class PartitionProxy(object):
__slots__ = ["_obj", "__weakref__"]
def __init__(self, obj):
object.__setattr__(self, "_obj", obj)
#
# proxying (special cases)
#
def __getattr__(self, name):
return getattr(object.__getattribute__(self, "_obj"), name)
def __delattr__(self, name):
delattr(object.__getattribute__(self, "_obj"), name)
def __setattr__(self, name, value):
setattr(object.__getattribute__(self, "_obj"), name, value)
def __nonzero__(self):
return bool(object.__getattribute__(self, "_obj"))
def __str__(self):
return "<{}: {}>".format(type(self), str(object.__getattribute__(self, "_obj")))
def __repr__(self):
return "<{}: {}>".format(type(self), repr(object.__getattribute__(self, "_obj")))
def __iter__(self):
return iter(object.__getattribute__(self, "_obj"))
class AnalysisPartition(PartitionProxy):
"""A subclass of Partition with methods designed for analysis with Pandas. It is produced from
the partitions analysis property"""
def dataframe(self, predicate=None, filtered_columns=None, columns=None, df_class=None):
"""Return the partition as a Pandas dataframe
:param predicate: If defined, a callable that is called for each row, and if it returns true, the
row is included in the output.
:param filtered_columns: If defined, the value is a dict of column names and
associated values. Only rows where all of the named columms have the given values will be returned.
Setting the argument will overwrite any value set for the predicate
:param columns: A list or tuple of column names to return
:return: Pandas dataframe
"""
from operator import itemgetter
from ambry.pands import AmbryDataFrame
df_class = df_class or AmbryDataFrame
if columns:
ig = itemgetter(*columns)
else:
ig = None
columns = self.table.header
if filtered_columns:
def maybe_quote(v):
from six import string_types
if isinstance(v, string_types):
return '"{}"'.format(v)
else:
return v
code = ' and '.join("row.{} == {}".format(k, maybe_quote(v))
for k, v in filtered_columns.items())
predicate = eval('lambda row: {}'.format(code))
if predicate:
def yielder():
for row in self.reader:
if predicate(row):
if ig:
yield ig(row)
else:
yield row.dict
df = df_class(yielder(), columns=columns, partition=self.measuredim)
return df
else:
def yielder():
for row in self.reader:
yield row.values()
# Put column names in header order
columns = [c for c in self.table.header if c in columns]
return df_class(yielder(), columns=columns, partition=self.measuredim)
def geoframe(self, simplify=None, predicate=None, crs=None, epsg=None):
"""
Return geopandas dataframe
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param predicate: A single-argument function to select which records to include in the output.
:param crs: Coordinate reference system information
:param epsg: Specifiy the CRS as an EPGS number.
:return: A Geopandas GeoDataFrame
"""
import geopandas
from shapely.wkt import loads
from fiona.crs import from_epsg
if crs is None and epsg is None and self.epsg is not None:
epsg = self.epsg
if crs is None:
try:
crs = from_epsg(epsg)
except TypeError:
raise TypeError('Must set either crs or epsg for output.')
df = self.dataframe(predicate=predicate)
geometry = df['geometry']
if simplify:
s = geometry.apply(lambda x: loads(x).simplify(simplify))
else:
s = geometry.apply(lambda x: loads(x))
df['geometry'] = geopandas.GeoSeries(s)
return geopandas.GeoDataFrame(df, crs=crs, geometry='geometry')
def shapes(self, simplify=None, predicate=None):
"""
Return geodata as a list of Shapely shapes
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param predicate: A single-argument function to select which records to include in the output.
:return: A list of Shapely objects
"""
from shapely.wkt import loads
if not predicate:
predicate = lambda row: True
if simplify:
return [loads(row.geometry).simplify(simplify) for row in self if predicate(row)]
else:
return [loads(row.geometry) for row in self if predicate(row)]
def patches(self, basemap, simplify=None, predicate=None, args_f=None, **kwargs):
"""
Return geodata as a list of Matplotlib patches
:param basemap: A mpl_toolkits.basemap.Basemap
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param predicate: A single-argument function to select which records to include in the output.
:param args_f: A function that takes a row and returns a dict of additional args for the Patch constructor
:param kwargs: Additional args to be passed to the descartes Path constructor
:return: A list of patch objects
"""
from descartes import PolygonPatch
from shapely.wkt import loads
from shapely.ops import transform
if not predicate:
predicate = lambda row: True
def map_xform(x, y, z=None):
return basemap(x, y)
def make_patch(shape, row):
args = dict(kwargs.items())
if args_f:
args.update(args_f(row))
return PolygonPatch(transform(map_xform, shape), **args)
def yield_patches(row):
if simplify:
shape = loads(row.geometry).simplify(simplify)
else:
shape = loads(row.geometry)
if shape.geom_type == 'MultiPolygon':
for subshape in shape.geoms:
yield make_patch(subshape, row)
else:
yield make_patch(shape, row)
return [patch for row in self if predicate(row)
for patch in yield_patches(row)]
class MeasureDimensionPartition(PartitionProxy):
"""A partition proxy for accessing measure and dimensions. When returning a column, it returns
a PartitionColumn, which proxies the table column while adding partition specific functions. """
def __init__(self, obj):
super(MeasureDimensionPartition, self).__init__(obj)
self.filters = {}
def column(self, c_name):
return PartitionColumn(self.table.column(c_name), self)
@property
def columns(self):
"""Iterate over all columns"""
return [PartitionColumn(c, self) for c in self.table.columns]
@property
def primary_columns(self):
"""Iterate over the primary columns, columns which do not have a parent"""
return [c for c in self.columns if not c.parent]
@property
def dimensions(self):
"""Iterate over all dimensions"""
from ambry.valuetype.core import ROLE
return [c for c in self.columns if c.role == ROLE.DIMENSION]
@property
def primary_dimensions(self):
"""Iterate over the primary columns, columns which do not have a parent and have a
cardinality greater than 1"""
from ambry.valuetype.core import ROLE
return [c for c in self.columns
if not c.parent and c.role == ROLE.DIMENSION and c.pstats.nuniques > 1]
@property
def measures(self):
"""Iterate over all measures"""
from ambry.valuetype.core import ROLE
return [c for c in self.columns if c.role == ROLE.MEASURE]
def measure(self, vid):
"""Return a measure, given its vid or another reference"""
from ambry.orm import Column
if isinstance(vid, PartitionColumn):
return vid
elif isinstance(vid, Column):
return PartitionColumn(vid)
else:
return PartitionColumn(self.table.column(vid), self)
def dimension(self, vid):
"""Return a dimention, given its vid or another reference"""
from ambry.orm import Column
if isinstance(vid, PartitionColumn):
return vid
elif isinstance(vid, Column):
return PartitionColumn(vid)
else:
return PartitionColumn(self.table.column(vid), self)
@property
def primary_measures(self):
"""Iterate over the primary measures, columns which do not have a parent"""
return [c for c in self.measures if not c.parent]
@property
def dict(self):
d = self.detail_dict
d['dimension_sets'] = self.enumerate_dimension_sets()
return d
def dataframe(self, measure, p_dim, s_dim=None, filters={}, df_class=None):
"""
Return a dataframe with a sumse of the columns of the partition, including a measure and one
or two dimensions. FOr dimensions that have labels, the labels are included
The returned dataframe will have extra properties to describe the conversion:
* plot_axes: List of dimension names for the first and second axis
* labels: THe names of the label columns for the axes
* filtered: The `filters` dict
* floating: The names of primary dimensions that are not axes nor filtered
THere is also an iterator, `rows`, which returns the header and then all of the rows.
:param measure: The column names of one or more measures
:param p_dim: The primary dimension. This will be the index of the dataframe.
:param s_dim: a secondary dimension. The returned frame will be unstacked on this dimension
:param filters: A dict of column names, mapped to a column value, indicating rows to select. a
row that passes the filter must have the values for all given rows; the entries are ANDED
:param df_class:
:return: a Dataframe, with extra properties
"""
import numpy as np
measure = self.measure(measure)
p_dim = self.dimension(p_dim)
assert p_dim
if s_dim:
s_dim = self.dimension(s_dim)
columns = set([measure.name, p_dim.name])
if p_dim.label:
# For geographic datasets, also need the gvid
if p_dim.geoid:
columns.add(p_dim.geoid.name)
columns.add(p_dim.label.name)
if s_dim:
columns.add(s_dim.name)
if s_dim.label:
columns.add(s_dim.label.name)
def maybe_quote(v):
from six import string_types
if isinstance(v, string_types):
return '"{}"'.format(v)
else:
return v
# Create the predicate to filter out the filtered dimensions
if filters:
selected_filters = []
for k, v in filters.items():
if isinstance(v, dict):
# The filter is actually the whole set of possible options, so
# just select the first one
v = v.keys()[0]
selected_filters.append("row.{} == {}".format(k, maybe_quote(v)))
code = ' and '.join(selected_filters)
predicate = eval('lambda row: {}'.format(code))
else:
code = None
def predicate(row):
return True
df = self.analysis.dataframe(predicate, columns=columns, df_class=df_class)
if df is None or df.empty or len(df) == 0:
return None
# So we can track how many records were aggregated into each output row
df['_count'] = 1
def aggregate_string(x):
return ', '.join(set(str(e) for e in x))
agg = {
'_count': 'count',
}
for col_name in columns:
c = self.column(col_name)
# The primary and secondary dimensions are put into the index by groupby
if c.name == p_dim.name or (s_dim and c.name == s_dim.name):
continue
# FIXME! This will only work if the child is only level from the parent. Should
# have an acessor for the top level.
if c.parent and (c.parent == p_dim.name or (s_dim and c.parent == s_dim.name)):
continue
if c.is_measure:
agg[c.name] = np.mean
if c.is_dimension:
agg[c.name] = aggregate_string
plot_axes = [p_dim.name]
if s_dim:
plot_axes.append(s_dim.name)
df = df.groupby(list(columns - set([measure.name]))).agg(agg).reset_index()
df._metadata = ['plot_axes', 'filtered', 'floating', 'labels', 'dimension_set', 'measure']
df.plot_axes = [c for c in plot_axes]
df.filtered = filters
# Dimensions that are not specified as axes nor filtered
df.floating = list(set(c.name for c in self.primary_dimensions) -
set(df.filtered.keys()) -
set(df.plot_axes))
df.labels = [self.column(c).label.name if self.column(c).label else c for c in df.plot_axes]
df.dimension_set = self.dimension_set(p_dim, s_dim=s_dim)
df.measure = measure.name
def rows(self):
yield ['id'] + list(df.columns)
for t in df.itertuples():
yield list(t)
# Really should not do this, but I don't want to re-build the dataframe with another
# class
df.__class__.rows = property(rows)
return df
def dimension_set(self, p_dim, s_dim=None, dimensions=None, extant=set()):
"""
Return a dict that describes the combination of one or two dimensions, for a plot
:param p_dim:
:param s_dim:
:param dimensions:
:param extant:
:return:
"""
if not dimensions:
dimensions = self.primary_dimensions
key = p_dim.name
if s_dim:
key += '/' + s_dim.name
# Ignore if the key already exists or the primary and secondary dims are the same
if key in extant or p_dim == s_dim:
return
# Don't allow geography to be a secondary dimension. It must either be a primary dimension
# ( to make a map ) or a filter, or a small-multiple
if s_dim and s_dim.valuetype_class.is_geo():
return
extant.add(key)
filtered = {}
for d in dimensions:
if d != p_dim and d != s_dim:
filtered[d.name] = d.pstats.uvalues.keys()
if p_dim.valuetype_class.is_time():
value_type = 'time'
chart_type = 'line'
elif p_dim.valuetype_class.is_geo():
value_type = 'geo'
chart_type = 'map'
else:
value_type = 'general'
chart_type = 'bar'
return dict(
key=key,
p_dim=p_dim.name,
p_dim_type=value_type,
p_label=p_dim.label_or_self.name,
s_dim=s_dim.name if s_dim else None,
s_label=s_dim.label_or_self.name if s_dim else None,
filters=filtered,
chart_type=chart_type
)
def enumerate_dimension_sets(self):
dimension_sets = {}
dimensions = self.primary_dimensions
extant = set()
for d1 in dimensions:
ds = self.dimension_set(d1, None, dimensions, extant)
if ds:
dimension_sets[ds['key']] = ds
for d1 in dimensions:
for d2 in dimensions:
if d2.cardinality >= d1.cardinality:
d1, d2 = d2, d1
ds = self.dimension_set(d1, d2, dimensions, extant)
if ds:
dimension_sets[ds['key']] = ds
return dimension_sets
class ColumnProxy(PartitionProxy):
def __init__(self, obj, partition):
object.__setattr__(self, "_obj", obj)
object.__setattr__(self, "_partition", partition)
MAX_LABELS = 75 # Maximum number of uniques records before it's assume that the values aren't valid labels
class PartitionColumn(ColumnProxy):
"""A proxy on the Column that links a Column to a Partition, for direct access to the stats
and column labels"""
def __init__(self, obj, partition):
super(PartitionColumn, self).__init__(obj, partition)
object.__setattr__(self, "pstats", partition.stats_dict[obj.name])
@property
def children(self):
""""Return the table's other column that have this column as a parent, excluding labels"""
for child in self.children:
yield PartitionColumn(child, self._partition)
@property
def label(self):
""""Return first child that of the column that is marked as a label"""
for c in self.table.columns:
if c.parent == self.name and 'label' in c.valuetype:
return PartitionColumn(c, self._partition)
@property
def value_labels(self):
"""Return a map of column code values mapped to labels, for columns that have a label column
If the column is not assocaited with a label column, it returns an identity map.
WARNING! This reads the whole partition, so it is really slow
"""
from operator import itemgetter
card = self.pstats.nuniques
if self.label:
ig = itemgetter(self.name, self.label.name)
elif self.pstats.nuniques < MAX_LABELS:
ig = itemgetter(self.name, self.name)
else:
return {}
label_set = set()
for row in self._partition:
label_set.add(ig(row))
if len(label_set) >= card:
break
d = dict(label_set)
assert len(d) == len(label_set) # Else the label set has multiple values per key
return d
@property
def cardinality(self):
"""Returns the bymber of unique elements"""
return self.pstats.nuniques
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.name)
| bsd-2-clause |
aswolf/xmeos | xmeos/test/test_models.py | 1 | 5903 | from __future__ import absolute_import, print_function, division, with_statement
from builtins import object
import numpy as np
import xmeos
from xmeos import models
from xmeos.models import core
import pytest
import matplotlib.pyplot as plt
import matplotlib as mpl
from abc import ABCMeta, abstractmethod
import copy
try:
import cPickle as pickle
except:
import pickle
#====================================================================
# Define "slow" tests
# - indicated by @slow decorator
# - slow tests are run only if using --runslow cmd line arg
#====================================================================
slow = pytest.mark.skipif(
not pytest.config.getoption("--runslow"),
reason="need --runslow option to run"
)
class BaseTestEos(object):
@abstractmethod
def load_eos(self):
pass
def test_param_getset(self):
eos_mod = self.load_eos()
param_names = eos_mod.param_names
param_values_orig = eos_mod.param_values
param_values = []
for name in param_names:
value, = eos_mod.get_param_values(param_names=name)
param_values.append(value)
param_values = np.array(param_values)
assert np.all(param_values==param_values_orig), \
'param values retrieved one at a time unequal.'
ind_all = np.arange(len(param_names))
ind_reorder = np.random.permutation(ind_all)
eos_mod.set_param_values(param_values[ind_reorder],
param_names=param_names[ind_reorder])
param_values_reorder = eos_mod.get_param_values()[ind_reorder]
assert np.all(param_values[ind_reorder]==param_values_reorder), \
'param value setter fails upon reordering param_names.'
# Test scaling/unscaling parameters
FAC = 2
eos_mod.set_param_values(FAC*eos_mod.param_values)
for name, value in zip(eos_mod.param_names, eos_mod.param_values):
eos_mod.set_param_values(value/FAC, param_names=name)
param_values_set = eos_mod.param_values
assert np.all(param_values_set==param_values_orig), (
'Parameter set method not working. '
'Doubling and Halving should match original param. values.' )
pass
def numerical_deriv(self, x, y, dydx, scale=1):
Nsamp = len(x)
assert len(y)==Nsamp, 'y array must be same length as x array.'
assert len(dydx)==Nsamp, 'dydx array must be same length as x array.'
try:
assert len(scale)==Nsamp, (
'If scale is an array, it must be same length as x.' )
except:
pass
dx = x[1]-x[0]
dydx_num = scale*np.gradient(y, dx)
dydx_range = np.max(dydx)-np.min(dydx)
dydx_diff = dydx_num-dydx
abs_err = np.max(np.abs(dydx_diff)[1:-1])
rel_err = np.max(np.abs(dydx_diff/dydx)[1:-1])
range_err = np.max(np.abs(dydx_diff/dydx_range)[1:-1])
return abs_err, rel_err, range_err
def test_pickle(self):
eos_mod = self.load_eos()
data_string = pickle.dumps(eos_mod)
eos_load_mod = pickle.loads(data_string)
# filenm = 'test/pkl/test_pickle.pkl'
# with open(filenm, 'w') as f:
# pickle.dump(eos_mod, f)
# with open(filenm, 'r') as f:
# eos_loaded = pickle.load(f)
assert repr(eos_mod)==repr(eos_load_mod), (
'Pickled and unpickled Eos Models are not equal.')
# # def do_test_energy_perturb_eval(self):
# # TOL = 1e-4
# # dxfrac = 1e-8
# # Nsamp = 10001
# # eos_mod = self.init_params()
# # param_d = eos_d['param_d']
# # Vmod_a = np.linspace(.7,1.3,Nsamp)*param_d['V0']
# # dV = Vmod_a[1] - Vmod_a[0]
# # if compress_path_mod.expand_adj:
# # scale_a, paramkey_a = \
# # compress_path_mod.get_param_scale( eos_d,apply_expand_adj=True )
# # else:
# # scale_a, paramkey_a = compress_path_mod.get_param_scale( eos_d)
# # Eperturb_num_a = np.zeros((paramkey_a.size,Nsamp))
# # for ind,paramkey in enumerate(paramkey_a):
# # Eperturb_num_a[ind,:] = compress_path_mod.param_deriv\
# # ( 'energy', paramkey, Vmod_a, eos_d, dxfrac=dxfrac)
# # # dEdV0_a = compress_path_mod.param_deriv( 'energy', 'V0', Vmod_a, eos_d, dxfrac=dxfrac)
# # # dEdK0_a = compress_path_mod.param_deriv( 'energy', 'K0', Vmod_a, eos_d, dxfrac=dxfrac)
# # # dEdKP0_a = compress_path_mod.param_deriv( 'energy', 'KP0', Vmod_a, eos_d, dxfrac=dxfrac)
# # # dEdKP20_a = compress_path_mod.param_deriv( 'energy', 'KP20', Vmod_a, eos_d, dxfrac=dxfrac)
# # # dEdE0_a = compress_path_mod.param_deriv( 'energy', 'E0', Vmod_a, eos_d, dxfrac=dxfrac)
# # Eperturb_a, scale_a, paramkey_a = compress_path_mod.energy_perturb(Vmod_a, eos_d)
# # # print paramkey_a
# # # Eperturb_num_a = np.vstack((dEdV0_a,dEdK0_a,dEdKP0_a,dEdKP20_a,dEdE0_a))
# # max_error_a = np.max(np.abs(Eperturb_a-Eperturb_num_a),axis=1)
# # # try:
# # # except:
# # # from IPython import embed; embed(); import ipdb; ipdb.set_trace()
# # # plt.plot(Vmod_a,Eperturb_a.T,'-',Vmod_a, Eperturb_num_a.T,'--')
# # # plt.ion()
# # # plt.figure()
# # # plt.clf()
# # # plt.plot(Vmod_a[::100], Eperturb_num_a[:,::100].T,'x',
# # # Vmod_a[::100], Eperturb_a[3,::100].T,'r-')
# # # plt.plot(Vmod_a[::100], Eperturb_num_a[:,::100].T,'x',
# # # Vmod_a, Eperturb_a.T,'-')
# # # plt.plot(Vmod_a[::100], Eperturb_a[3,::100].T,'r-')
# # # Eperturb_num_a-Eperturb_a
# # assert np.all(max_error_a < TOL),'Error in energy perturbation must be'\
# # 'less than TOL.'
| mit |
harisbal/pandas | pandas/tests/indexing/test_timedelta.py | 4 | 3710 | import numpy as np
import pytest
import pandas as pd
from pandas.util import testing as tm
class TestTimedeltaIndexing(object):
def test_boolean_indexing(self):
# GH 14946
df = pd.DataFrame({'x': range(10)})
df.index = pd.to_timedelta(range(10), unit='s')
conditions = [df['x'] > 3, df['x'] == 3, df['x'] < 3]
expected_data = [[0, 1, 2, 3, 10, 10, 10, 10, 10, 10],
[0, 1, 2, 10, 4, 5, 6, 7, 8, 9],
[10, 10, 10, 3, 4, 5, 6, 7, 8, 9]]
for cond, data in zip(conditions, expected_data):
result = df.assign(x=df.mask(cond, 10).astype('int64'))
expected = pd.DataFrame(data,
index=pd.to_timedelta(range(10), unit='s'),
columns=['x'],
dtype='int64')
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize(
"indexer, expected",
[(0, [20, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
(slice(4, 8), [0, 1, 2, 3, 20, 20, 20, 20, 8, 9]),
([3, 5], [0, 1, 2, 20, 4, 20, 6, 7, 8, 9])])
def test_list_like_indexing(self, indexer, expected):
# GH 16637
df = pd.DataFrame({'x': range(10)}, dtype="int64")
df.index = pd.to_timedelta(range(10), unit='s')
df.loc[df.index[indexer], 'x'] = 20
expected = pd.DataFrame(expected,
index=pd.to_timedelta(range(10), unit='s'),
columns=['x'],
dtype="int64")
tm.assert_frame_equal(expected, df)
def test_string_indexing(self):
# GH 16896
df = pd.DataFrame({'x': range(3)},
index=pd.to_timedelta(range(3), unit='days'))
expected = df.iloc[0]
sliced = df.loc['0 days']
tm.assert_series_equal(sliced, expected)
@pytest.mark.parametrize(
"value",
[None, pd.NaT, np.nan])
def test_masked_setitem(self, value):
# issue (#18586)
series = pd.Series([0, 1, 2], dtype='timedelta64[ns]')
series[series == series[0]] = value
expected = pd.Series([pd.NaT, 1, 2], dtype='timedelta64[ns]')
tm.assert_series_equal(series, expected)
@pytest.mark.parametrize(
"value",
[None, pd.NaT, np.nan])
def test_listlike_setitem(self, value):
# issue (#18586)
series = pd.Series([0, 1, 2], dtype='timedelta64[ns]')
series.iloc[0] = value
expected = pd.Series([pd.NaT, 1, 2], dtype='timedelta64[ns]')
tm.assert_series_equal(series, expected)
@pytest.mark.parametrize('start,stop, expected_slice', [
[np.timedelta64(0, 'ns'), None, slice(0, 11)],
[np.timedelta64(1, 'D'), np.timedelta64(6, 'D'), slice(1, 7)],
[None, np.timedelta64(4, 'D'), slice(0, 5)]])
def test_numpy_timedelta_scalar_indexing(self, start, stop,
expected_slice):
# GH 20393
s = pd.Series(range(11), pd.timedelta_range('0 days', '10 days'))
result = s.loc[slice(start, stop)]
expected = s.iloc[expected_slice]
tm.assert_series_equal(result, expected)
def test_roundtrip_thru_setitem(self):
# PR 23462
dt1 = pd.Timedelta(0)
dt2 = pd.Timedelta(28767471428571405)
df = pd.DataFrame({'dt': pd.Series([dt1, dt2])})
df_copy = df.copy()
s = pd.Series([dt1])
expected = df['dt'].iloc[1].value
df.loc[[True, False]] = s
result = df['dt'].iloc[1].value
assert expected == result
tm.assert_frame_equal(df, df_copy)
| bsd-3-clause |
hainm/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
bnaul/scikit-learn | examples/cluster/plot_mean_shift.py | 23 | 1775 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets import make_blobs
# #############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
# #############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
# #############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
kgjamieson/NEXT | apps/PoolBasedBinaryClassification/dashboard/Dashboard.py | 2 | 3962 | import json
import numpy
import numpy.random
from datetime import datetime
from datetime import timedelta
import next.utils as utils
from next.apps.AppDashboard import AppDashboard
# import next.database_client.DatabaseAPIHTTP as db
# import next.logging_client.LoggerHTTP as ell
class MyAppDashboard(AppDashboard):
def __init__(self,db,ell):
AppDashboard.__init__(self, db, ell)
def test_error_multiline_plot(self,app, butler):
"""
Description: Returns multiline plot where there is a one-to-one mapping lines to
algorithms and each line indicates the error on the validation set with respect to number of reported answers
Expected input:
None
Expected output (in dict):
(dict) MPLD3 plot dictionary
"""
args = butler.experiment.get(key='args')
alg_list = args['alg_list']
test_alg_label = alg_list[0]['test_alg_label']
test_queries = butler.db.get_docs_with_filter(app.app_id+':queries',{'exp_uid':app.exp_uid, 'alg_label':test_alg_label})
test_S = [(query['target_index'], query['target_label'])
for query in test_queries
if 'target_index' in query.keys()]
targets = butler.targets.get_targetset(app.exp_uid)
targets = sorted(targets,key=lambda x: x['target_id'])
target_features = []
for target_index in range(len(targets)):
target_vec = targets[target_index]['meta']['features']
target_vec.append(1.)
target_features.append(target_vec)
x_min = numpy.float('inf')
x_max = -numpy.float('inf')
y_min = numpy.float('inf')
y_max = -numpy.float('inf')
list_of_alg_dicts = []
for algorithm in alg_list:
alg_label = algorithm['alg_label']
list_of_log_dict = self.ell.get_logs_with_filter(app.app_id+':ALG-EVALUATION',{'exp_uid':app.exp_uid, 'alg_label':alg_label})
list_of_log_dict = sorted(list_of_log_dict, key=lambda item: utils.str2datetime(item['timestamp']) )
x = []
y = []
for item in list_of_log_dict:
num_reported_answers = item['num_reported_answers']
weights = item['weights']
err = 0.
for q in test_S:
estimated_label = numpy.sign(numpy.dot( numpy.array(target_features[q[0]]), numpy.array(weights) ))
err += estimated_label*q[1]<0. #do the labels agree or not
m = float(len(test_S))
err = err/m
x.append(num_reported_answers)
y.append(err)
x = numpy.argsort(x)
x = [x[i] for i in x]
y = [y[i] for i in x]
alg_dict = {}
alg_dict['legend_label'] = alg_label
alg_dict['x'] = x
alg_dict['y'] = y
try:
x_min = min(x_min,min(x))
x_max = max(x_max,max(x))
y_min = min(y_min,min(y))
y_max = max(y_max,max(y))
except:
pass
list_of_alg_dicts.append(alg_dict)
import matplotlib.pyplot as plt
import mpld3
fig, ax = plt.subplots(subplot_kw=dict(axisbg='#EEEEEE'))
for alg_dict in list_of_alg_dicts:
ax.plot(alg_dict['x'],alg_dict['y'],label=alg_dict['legend_label'])
ax.set_xlabel('Number of answered queries')
ax.set_ylabel('Error on hold-out set')
ax.set_xlim([x_min,x_max])
ax.set_ylim([y_min,y_max])
ax.grid(color='white', linestyle='solid')
ax.set_title('Test Error', size=14)
legend = ax.legend(loc=2,ncol=3,mode="expand")
for label in legend.get_texts():
label.set_fontsize('small')
plot_dict = mpld3.fig_to_dict(fig)
plt.close()
return plot_dict
| apache-2.0 |
GUBotDev/mavlink | pymavlink/tools/mavfft.py | 23 | 2505 | #!/usr/bin/env python
'''
fit best estimate of magnetometer offsets
'''
import sys, time, os, math, numpy
import matplotlib.pyplot as plt
import pylab
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--condition", default=None, help="select packets by condition")
parser.add_argument("--sample-length", type=int, default=0, help="number of samples to run FFT over")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
def fft(logfile):
'''display fft for raw ACC data in logfile'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename)
data = {'ACC1.rate' : 1000,
'ACC2.rate' : 1600,
'ACC3.rate' : 1000,
'GYR1.rate' : 1000,
'GYR2.rate' : 800,
'GYR3.rate' : 1000}
for acc in ['ACC1','ACC2','ACC3']:
for ax in ['AccX', 'AccY', 'AccZ']:
data[acc+'.'+ax] = []
for gyr in ['GYR1','GYR2','GYR3']:
for ax in ['GyrX', 'GyrY', 'GyrZ']:
data[gyr+'.'+ax] = []
# now gather all the data
while True:
m = mlog.recv_match(condition=args.condition)
if m is None:
break
type = m.get_type()
if type.startswith("ACC"):
data[type+'.AccX'].append(m.AccX)
data[type+'.AccY'].append(m.AccY)
data[type+'.AccZ'].append(m.AccZ)
if type.startswith("GYR"):
data[type+'.GyrX'].append(m.GyrX)
data[type+'.GyrY'].append(m.GyrY)
data[type+'.GyrZ'].append(m.GyrZ)
print("Extracted %u data points" % len(data['ACC1.AccX']))
for msg in ['ACC1', 'ACC2', 'ACC3', 'GYR1', 'GYR2', 'GYR3']:
pylab.figure()
if msg.startswith('ACC'):
prefix = 'Acc'
else:
prefix = 'Gyr'
for axis in ['X', 'Y', 'Z']:
field = msg + '.' + prefix + axis
d = data[field]
if args.sample_length != 0:
d = d[0:args.sample_length]
d = numpy.array(d)
if len(d) == 0:
continue
avg = numpy.sum(d) / len(d)
d -= avg
d_fft = numpy.fft.rfft(d)
freq = numpy.fft.rfftfreq(len(d), 1.0 / data[msg+'.rate'])
pylab.plot( freq, numpy.abs(d_fft), label=field )
pylab.legend(loc='upper right')
for filename in args.logs:
fft(filename)
pylab.show()
| lgpl-3.0 |
courtarro/gnuradio-wg-grc | gr-filter/examples/fir_filter_fff.py | 47 | 4014 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_fff(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_f(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_float, self._nsamps)
self.filt0 = filter.fir_filter_fff(self._decim, taps)
self.vsnk_src = blocks.vector_sink_f()
self.vsnk_out = blocks.vector_sink_f()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_fff(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
Universal-Model-Converter/UMC3.0a | data/Python/x86/Lib/site-packages/scipy/interpolate/ndgriddata.py | 3 | 6245 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Parameters
----------
points : (Npoints, Ndims) ndarray of floats
Data point coordinates.
values : (Npoints,) ndarray of float or complex
Data values.
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y):
x = _ndim_coords_from_arrays(x)
self._check_init_shape(x, y)
self.tree = cKDTree(x)
self.points = x
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args)
xi = self._check_call_shape(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan):
"""
Interpolate unstructured N-dimensional data.
.. versionadded:: 0.9
Parameters
----------
points : ndarray of floats, shape (N, ndim)
Data point coordinates. Can either be an array of
size (N, ndim), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (N,)
Data values.
xi : ndarray of float, shape (M, ndim)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``: return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``: tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D): return the value determined from a cubic
spline.
``cubic`` (2-D): return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
>>> return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| mit |
meduz/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 127 | 1732 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
BenaroyaResearch/bripipetools | tests/test_postprocess.py | 1 | 29917 | import logging
import os
import shutil
import pytest
import pandas as pd
from bripipetools import postprocessing
from bripipetools import io
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class TestOutputStitcher:
"""
Tests methods for the `OutputSticher` class in the
`bripipetools.postprocessing.stitching` module, which is used
to combine output data across all sources and samples into
a single table for a selected output type.
"""
@pytest.mark.parametrize(
'test_input, expected_result',
[
('counts', 'counts'),
('metrics', 'metrics'),
('QC', 'qc'),
('validation', 'validation'),
]
)
def test_sniff_output_type(self, tmpdir, test_input, expected_result):
# GIVEN a path to a folder with output data
mock_path = tmpdir.join(test_input)
# AND a sticher object is created for that path
stitcher = postprocessing.OutputStitcher(
path=str(mock_path)
)
# WHEN the path is checked to determine output type from a predefined
# set of options
# THEN the assigned output type should match the expected result
assert (stitcher._sniff_output_type() == expected_result)
@pytest.mark.parametrize(
'test_input, expected_result',
[
(('metrics', 'htseq'), getattr(io, 'HtseqMetricsFile')),
(('metrics', 'picard-rnaseq'), getattr(io, 'PicardMetricsFile')),
(('metrics', 'picard-markdups'), getattr(io, 'PicardMetricsFile')),
(('metrics', 'picard-align'), getattr(io, 'PicardMetricsFile')),
(('metrics', 'tophat-stats'), getattr(io, 'TophatStatsFile')),
(('qc', 'fastqc'), getattr(io, 'FastQCFile')),
(('counts', 'htseq'), getattr(io, 'HtseqCountsFile')),
(('validation', 'sexcheck'), getattr(io, 'SexcheckFile'))
]
)
def test_get_parser(self, tmpdir, test_input, expected_result):
# GIVEN an arbitrary path
mock_path = tmpdir.join('')
# AND a stitcher object is created for that path
stitcher = postprocessing.OutputStitcher(
path=str(mock_path)
)
# WHEN the io parser class is retrieved for a particular output
# type and source
mock_type, mock_source = test_input
testparser = stitcher._get_parser(mock_type, mock_source)
# THEN the io class should match the expected result
assert (testparser == expected_result)
def test_read_data(self, tmpdir):
# GIVEN a path to a folder with output data of type 'metrics'
# (output type should not matter here, assuming that individual
# io classes/modules have been tested)
mock_path = tmpdir.join('metrics')
# AND the folder contains outputs from multiple sources and
# for multiple samples
mock_filedata = [
{'mock_filename': 'lib1111_C00000XX_htseq_metrics.txt',
'mock_contents': ['__field_1\t123\n',
'__field_2\t321\n']},
{'mock_filename': 'lib1111_C00000XX_tophat_stats_metrics.txt',
'mock_contents': ['12345\ttotal reads in fastq file\n'
'54321\treads aligned in sam file\n']},
{'mock_filename': 'lib2222_C00000XX_htseq_metrics.txt',
'mock_contents': ['__field_1\t456\n',
'__field_2\t654\n']},
{'mock_filename': 'lib2222_C00000XX_tophat_stats_metrics.txt',
'mock_contents': ['56789\ttotal reads in fastq file\n'
'98765\treads aligned in sam file\n']},
]
for m in mock_filedata:
mock_file = mock_path.ensure(m['mock_filename'])
mock_file.write(''.join(m['mock_contents']))
# AND a stitcher object is created for the folder path
stitcher = postprocessing.OutputStitcher(
path=str(mock_path)
)
# WHEN all file contents in the folder are read and stored as a dict
# in the object's 'data' attribute (in the field corresponding to
# output type)
stitcher._read_data()
# THEN the data stored in the dict should be properly parsed into
# key-value pairs and grouped by output source for each sample
assert (stitcher.data['metrics'] ==
{
'lib1111_C00000XX': [
{'htseq': {'field_1': 123,
'field_2': 321}},
{'tophat-stats': {'fastq_total_reads':
12345.0,
'reads_aligned_sam':
54321.0}},
],
'lib2222_C00000XX': [
{'htseq': {'field_1': 456,
'field_2': 654}},
{'tophat-stats': {'fastq_total_reads':
56789.0,
'reads_aligned_sam':
98765.0}},
],
})
def test_build_table_for_noncount_data(self, tmpdir):
# GIVEN a path to a folder with output data of type 'metrics'
mock_path = tmpdir.join('metrics')
# AND a stitcher object is created for the folder path
stitcher = postprocessing.OutputStitcher(
path=str(mock_path)
)
# AND parsed data from output files are stored as a nested dict
# in the object's 'data' attribute (in the field corresponding to
# output type)
mock_data = {
'lib1111_C00000XX': [
{'htseq': {'field_1': 123,
'field_2': 321}},
{'tophat_stats': {'fastq_total_reads':
12345,
'reads_aligned_sam':
54321}},
],
'lib2222_C00000XX': [
{'htseq': {'field_1': 456,
'field_2': 654}},
{'tophat_stats': {'fastq_total_reads':
56789,
'reads_aligned_sam':
98765}},
],
}
stitcher.data = {'metrics': mock_data}
# WHEN all key-value pairs for output data are combined into a
# single list corresponding to rows of a table for all samples
testdata = stitcher._build_table()
# THEN the list of lists (where each sublist is a table row)
# should match the expected result, with sample IDs in the first
# column and output keys in remaining columns
assert (testdata
== [
['libId', 'fastq_total_reads',
'field_1', 'field_2', 'reads_aligned_sam'],
['lib1111_C00000XX', 12345,
123, 321, 54321],
['lib2222_C00000XX', 56789,
456, 654, 98765]
])
def test_build_table_for_count_data(self, tmpdir):
# GIVEN a path to a folder with output data of type 'counts'
# (methods for combining count data require Pandas dataframe
# operations, and thus need to be treated differently than
# other output types)
mock_path = tmpdir.join('counts')
# AND a stitcher object is created for the folder path
stitcher = postprocessing.OutputStitcher(
path=str(mock_path)
)
# AND parsed data from output files are stored as a nested dict
# in the object's 'data' attribute (in the field corresponding to
# output type)
mock_data = {
'lib1111_C00000XX': [
{'htseq': pd.DataFrame([['field1', 0], ['field2', 1]],
columns=['geneName', 'count'])}
],
'lib2222_C00000XX': [
{'htseq': pd.DataFrame([['field1', 1], ['field2', 0]],
columns=['geneName', 'count'])}
]
}
stitcher.data = {'counts': mock_data}
# WHEN all count data frames merged into a single data frame
# for all samples, with gene IDs stored in the first column and
# counts for individual samples stored in remaining columns
testdata = stitcher._build_table()
# THEN the combined data frame should match the expected result
mock_df = pd.DataFrame(
[['field1', 0, 1], ['field2', 1, 0]],
columns=['geneName', 'lib1111_C00000XX', 'lib2222_C00000XX']
)
assert all((testdata[k] == mock_df[k]).all() for k in list(mock_df.keys()))
def test_build_combined_filename(self, tmpdir):
# GIVEN a path to a folder with output data of type 'metrics',
# which exists in a processed project folder at the path
# '<root>/genomics/Illumina/<run-id>/<project-folder>'
mock_run = '161231_INSTID_0001_AC00000XX'
mock_project = 'Project_P00-00Processed_161231'
mock_path = (tmpdir.mkdir('genomics').mkdir('Illumina')
.mkdir(mock_run)
.mkdir(mock_project)
.mkdir('metrics'))
# AND a stitcher object is created for the folder path
stitcher = postprocessing.OutputStitcher(
path=str(mock_path)
)
# WHEN the combined filename is constructed for output data
# of the current type
testfilename = stitcher._build_combined_filename()
# THEN the filename should be in the form
# '<project-id>_<flowcell-id>_<process-date>_combined_<out-type>.csv'
assert (testfilename == 'P00-00_C00000XX_161231_combined_metrics.csv')
def test_write_table_for_noncount_data(self, tmpdir):
# GIVEN a path to a folder with output data of type 'metrics',
# which exists in a processed project folder at the path
# '<root>/genomics/Illumina/<run-id>/<project-folder>'
mock_run = '161231_INSTID_0001_AC00000XX'
mock_project = 'Project_P00-00Processed_161231'
mock_path = (tmpdir.mkdir('genomics').mkdir('Illumina')
.mkdir(mock_run)
.mkdir(mock_project)
.mkdir('metrics'))
# AND the folder contains outputs from multiple sources and
# for multiple samples
mock_filedata = [
{'mock_filename': 'lib1111_C00000XX_htseq_metrics.txt',
'mock_contents': ['__field_1\t123\n',
'__field_2\t321\n']},
{'mock_filename': 'lib1111_C00000XX_tophat_stats_metrics.txt',
'mock_contents': ['12345\ttotal reads in fastq file\n'
'54321\treads aligned in sam file\n']},
{'mock_filename': 'lib2222_C00000XX_htseq_metrics.txt',
'mock_contents': ['__field_1\t456\n',
'__field_2\t654\n']},
{'mock_filename': 'lib2222_C00000XX_tophat_stats_metrics.txt',
'mock_contents': ['56789\ttotal reads in fastq file\n'
'98765\treads aligned in sam file\n']},
]
for m in mock_filedata:
mock_file = mock_path.ensure(m['mock_filename'])
mock_file.write(''.join(m['mock_contents']))
# AND a stitcher object is created for the folder path
stitcher = postprocessing.OutputStitcher(
path=str(mock_path)
)
# WHEN combined data across all samples is written as a table
# in a new file
testtablefile = stitcher.write_table()
# THEN the combined table should exist at the expected path and
# contain the expected contents
mock_tablefile = mock_path.join(
'P00-00_C00000XX_161231_combined_metrics.csv'
)
mock_contents = [
','.join(['libId', 'fastq_total_reads',
'field_1', 'field_2', 'reads_aligned_sam\n']),
','.join(['lib1111_C00000XX', '12345.0',
'123', '321', '54321.0\n']),
','.join(['lib2222_C00000XX', '56789.0',
'456', '654', '98765.0\n']),
]
assert (testtablefile == mock_tablefile)
with open(testtablefile) as f:
assert (f.readlines() == mock_contents)
def test_write_table_for_count_data(self, tmpdir):
# GIVEN a path to a folder with output data of type 'counts',
# which exists in a processed project folder at the path
# '<root>/genomics/Illumina/<run-id>/<project-folder>'
mock_run = '161231_INSTID_0001_AC00000XX'
mock_project = 'Project_P00-00Processed_161231'
mock_path = (tmpdir.mkdir('genomics').mkdir('Illumina')
.mkdir(mock_run)
.mkdir(mock_project)
.mkdir('counts'))
# AND the folder contains outputs from multiple sources and
# for multiple samples
mock_filedata = [
{'mock_filename': 'lib1111_C00000XX_htseq_counts.txt',
'mock_contents': ['field1\t0\n',
'field2\t1\n']},
{'mock_filename': 'lib2222_C00000XX_htseq_counts.txt',
'mock_contents': ['field1\t1\n',
'field2\t0\n']},
]
for m in mock_filedata:
mock_file = mock_path.ensure(m['mock_filename'])
mock_file.write(''.join(m['mock_contents']))
# AND a stitcher object is created for the folder path
stitcher = postprocessing.OutputStitcher(
path=str(mock_path)
)
# WHEN combined data across all samples is written as a table
# in a new file
testtablefile = stitcher.write_table()
# THEN the combined table should exist at the expected path and
# contain the expected contents
mock_tablefile = mock_path.join(
'P00-00_C00000XX_161231_combined_counts.csv'
)
mock_contents = [
','.join(['geneName', 'lib1111_C00000XX', 'lib2222_C00000XX\n']),
','.join(['field1', '0', '1\n']),
','.join(['field2', '1', '0\n']),
]
assert (testtablefile == mock_tablefile)
with open(testtablefile) as f:
assert (f.readlines() == mock_contents)
class TestOutputCompiler:
"""
Tests methods for the `OutputCompiler` class in the
`bripipetools.postprocessing.compiling` module, which is used
to merge combined output data from multiple summary output
types (i.e., summary indicates one value per sample).
"""
def test_read_data(self, tmpdir):
# GIVEN a path to a processed project folder at the path
# '<root>/genomics/Illumina/<run-id>/<project-folder>'
mock_run = '161231_INSTID_0001_AC00000XX'
mock_project = 'Project_P00-00Processed_161231'
# AND one or more folders with output data of 'summary' types
# (e.g., metrics, QC, validation), each of which includes a
# 'combined' table file for its respective type
mock_paths = []
mock_projectpath = (tmpdir.mkdir('genomics').mkdir('Illumina')
.mkdir(mock_run)
.mkdir(mock_project))
for i in range(2):
mock_path = mock_projectpath.mkdir('type{}'.format(i))
mock_tablefile = mock_path.join(
'P00-00_C00000XX_161231_combined_type{}.csv'.format(i)
)
mock_paths.append(str(mock_tablefile))
mock_contents = [
'libId,type{}_field1,type{}_field2\n',
'sample1,type{}_sample1_value1,type{}_sample1_value2\n',
'sample2,type{}_sample2_value1,type{}_sample2_value2\n',
]
mock_tablefile.write(
''.join([line.format(i+1, i+1) for line in mock_contents])
)
# AND a compiler object is created for the project folder path
compiler = postprocessing.OutputCompiler(
paths=mock_paths
)
# WHEN data from the combined table for each type is read and
# stored in the object's 'data' attribute
compiler._read_data()
# THEN the resulting list stored in the object's 'data' attribute
# should include a list for each combined table, with each item
# representing a row in a table as a list of column values
mock_data = [
[
['libId', 'type1_field1', 'type1_field2'],
['sample1','type1_sample1_value1', 'type1_sample1_value2'],
['sample2', 'type1_sample2_value1', 'type1_sample2_value2'],
],
[
['libId', 'type2_field1', 'type2_field2'],
['sample1', 'type2_sample1_value1', 'type2_sample1_value2'],
['sample2', 'type2_sample2_value1', 'type2_sample2_value2'],
],
]
assert (compiler.data == mock_data)
def test_build_table(self):
# GIVEN a compiler object, created for an arbitrary list of paths
compiler = postprocessing.OutputCompiler(
paths=[]
)
# AND parsed data from combined output table files are stroed in the
# object's 'data' attribute
mock_data = [
[
['libId', 'type1_field1', 'type1_field2'],
['sample1','type1_sample1_value1', 'type1_sample1_value2'],
['sample2', 'type1_sample2_value1', 'type1_sample2_value2'],
],
[
['libId', 'type2_field1', 'type2_field2'],
['sample1', 'type2_sample1_value1', 'type2_sample1_value2'],
['sample2', 'type2_sample2_value1', 'type2_sample2_value2'],
],
]
compiler.data = mock_data
# WHEN combined data from each type are merged into a list
# representing representing rows for an overall project summary table
testdata = compiler._build_table()
# THEN the merged rows should contain sample (library) IDs in the
# first column, and all other columns from different output types
mock_tabledata = [
['libId', 'type1_field1', 'type1_field2',
'type2_field1', 'type2_field2'],
['sample1', 'type1_sample1_value1', 'type1_sample1_value2',
'type2_sample1_value1', 'type2_sample1_value2'],
['sample2', 'type1_sample2_value1', 'type1_sample2_value2',
'type2_sample2_value1', 'type2_sample2_value2'],
]
assert (testdata == mock_tabledata)
def test_build_combined_filename(self):
# GIVEN a list one or more paths to 'combined' table files for
# arbitrary output types
mock_paths = [
'P00-00_C00000XX_161231_combined_type{}.csv'.format(i)
for i in range(2)
]
# AND a compiler object is created for the paths
compiler = postprocessing.OutputCompiler(
paths=mock_paths
)
# WHEN the filename is constructed for the merged table with
# all summary output types
testfilename = compiler._build_combined_filename()
# THEN the filename should be in the form
# '<project-id>_<flowcell-id>_<process-date>_combined_summary_data.csv'
assert (testfilename
== 'P00-00_C00000XX_161231_combined_summary-data.csv')
def test_write_table(self, tmpdir):
# GIVEN a path to a processed project folder at the path
# '<root>/genomics/Illumina/<run-id>/<project-folder>'
mock_run = '161231_INSTID_0001_AC00000XX'
mock_project = 'Project_P00-00Processed_161231'
# AND one or more folders with output data of 'summary' types
# (e.g., metrics, QC, validation), each of which includes a
# 'combined' table file for its respective type
mock_paths = []
mock_projectpath = (tmpdir.mkdir('genomics').mkdir('Illumina')
.mkdir(mock_run)
.mkdir(mock_project))
for i in range(2):
mock_path = mock_projectpath.mkdir('type{}'.format(i))
mock_tablefile = mock_path.join(
'P00-00_C00000XX_161231_combined_type{}.csv'.format(i)
)
mock_paths.append(str(mock_tablefile))
mock_contents = [
'libId,type{}_field1,type{}_field2\n',
'sample1,type{}_sample1_value1,type{}_sample1_value2\n',
'sample2,type{}_sample2_value1,type{}_sample2_value2\n',
]
mock_tablefile.write(
''.join([line.format(i+1, i+1) for line in mock_contents])
)
# AND a compiler object is created for the project folder path
compiler = postprocessing.OutputCompiler(
paths=mock_paths
)
# WHEN compiled data across all summary output types is written
# as a table in a new file, stored directly under the project folder
testtablefile = compiler.write_table()
# THEN the combined table should exist at the expected path and
# contain the expected contents
mock_tablefile = mock_projectpath.join(
'P00-00_C00000XX_161231_combined_summary-data.csv'
)
mock_contents = [
','.join(['libId', 'type1_field1', 'type1_field2',
'type2_field1', 'type2_field2\n']),
','.join(['sample1',
'type1_sample1_value1', 'type1_sample1_value2',
'type2_sample1_value1', 'type2_sample1_value2\n']),
','.join(['sample2',
'type1_sample2_value1', 'type1_sample2_value2',
'type2_sample2_value1', 'type2_sample2_value2\n']),
]
assert (testtablefile == mock_tablefile)
with open(testtablefile) as f:
assert (f.readlines() == mock_contents)
class TestOutputCleaner:
"""
Tests methods for the `OutputCleaner` class in the
`bripipetools.postprocessing.cleanup` module, which is used to
reorganize and rename output files from deprecated layouts.
"""
def test_get_output_types(self, tmpdir):
# GIVEN a path to a folder with output data
mock_folders = ['counts', 'metrics', 'QC', 'alignments', 'logs']
for outfolder in mock_folders:
tmpdir.mkdir(outfolder)
# AND a cleaner object is created for that path
cleaner = postprocessing.OutputCleaner(
path=str(tmpdir)
)
# WHEN the path is checked to determine output type from a predefined
# set of options
test_types = cleaner._get_output_types()
# THEN the assigned output type should match the expected result
assert (set(test_types) == set(mock_folders))
@pytest.mark.parametrize(
'test_input', ['counts', 'metrics', 'QC', 'alignments', 'logs']
)
def test_get_output_paths(self, tmpdir, test_input):
# GIVEN a path to a folder with output data, and a subfolder
# corresponding to a particular output type contains one or
# more files
mock_path = tmpdir.mkdir(test_input)
mock_paths = []
for i in range(2):
mock_file = mock_path.ensure('outfile{}'.format(i))
mock_paths.append(str(mock_file))
# AND a cleaner object is created for that path
cleaner = postprocessing.OutputCleaner(
path=str(tmpdir)
)
# WHEN full paths are collected for all output files
test_paths = cleaner._get_output_paths(test_input)
# THEN list of paths should match expected results
assert (set(test_paths) == set(mock_paths))
def test_unzip_output(self, tmpdir):
# GIVEN a path to a folder with output data, and a subfolder
# corresponding to a particular output type
mock_path = tmpdir.mkdir('metrics')
# AND the folder contains a zipped archive with one or more
# output files
mock_zipdir = mock_path.mkdir('zipfolder')
mock_zipdir.ensure('outfile1')
mock_zippath = shutil.make_archive(str(mock_zipdir), 'zip',
str(mock_zipdir))
shutil.rmtree(str(mock_zipdir))
# AND a cleaner object is created for the path
outputcleaner = postprocessing.OutputCleaner(
path=str(tmpdir)
)
# WHEN the zipped archive is uncompressed
test_paths = outputcleaner._unzip_output(mock_zippath)
# THEN the individual output files previously stored in the
# zipped archive should now exist in the output type subfolder
assert ('outfile1' in
[os.path.basename(str(f)) for f in mock_path.listdir()])
assert (str(mock_path.join('outfile1')) in test_paths)
def test_unnest_output_file(self, tmpdir):
# GIVEN a path to a folder with output data, and a subfolder
# corresponding to a particular output type
mock_path = tmpdir.mkdir('metrics')
# AND the folder contains another subfolder with one or more
# output files
mock_subdir = mock_path.mkdir('subfolder')
mock_nestpath = mock_subdir.ensure('outfile1')
# AND a cleaner object is created for the path
outputcleaner = postprocessing.OutputCleaner(
path=str(tmpdir)
)
# WHEN output files in the subfolder are unnested
outputcleaner._unnest_output(str(mock_nestpath))
# THEN the files should exist directly under the output type
# folder and be labeled in the form '<subfolder>_<filename>'
assert ('subfolder_outfile1' in
[os.path.basename(str(f)) for f in mock_path.listdir()])
def test_unnest_output_zip(self, tmpdir):
# GIVEN a path to a folder with output data, and a subfolder
# corresponding to a particular output type
mock_path = tmpdir.mkdir('metrics')
# AND the folder contains another subfolder with one or more
# zipped archives than in turn contain one or more output files
mock_subdir = mock_path.mkdir('subfolder')
mock_zipdir = mock_subdir.mkdir('zipfolder')
mock_zipdir.ensure('outfile1')
mock_zippath = shutil.make_archive(str(mock_zipdir), 'zip',
str(mock_zipdir))
shutil.rmtree(str(mock_zipdir))
# AND a cleaner object is created for the path
outputcleaner = postprocessing.OutputCleaner(
path=str(tmpdir)
)
# WHEN zipped output files in the subfolder are unnested
outputcleaner._unnest_output(mock_zippath)
# THEN the zipped archives should first be flattened such that
# individual output files exist directly under the subfolder,
# and these files should then be unnested and exist directly
# under the output type folder (labeled as '<subfolder>_<filename>'
logger.debug(''.format(mock_path.listdir()))
assert ('subfolder_outfile1' in
[os.path.basename(str(f)) for f in mock_path.listdir()])
def test_recode_output(self, tmpdir):
# GIVEN a path to a folder with output data, and a subfolder
# corresponding to a particular output type, which contains
# an output file
mock_path = tmpdir.mkdir('QC')
mock_qcpath = mock_path.ensure('libID_fcID_fastqc_data.txt')
# AND a cleaner object is created for the path
outputcleaner = postprocessing.OutputCleaner(
path=str(tmpdir))
# WHEN the output file is renamed according to some predefined rule
test_path = outputcleaner._recode_output(str(mock_qcpath), 'QC')
# THEN the new filename should match the expected result
assert (os.path.basename(test_path) == 'libID_fcID_fastqc_qc.txt')
assert ('libID_fcID_fastqc_qc.txt' in
[os.path.basename(str(f)) for f in mock_path.listdir()])
def test_clean_outputs(self, tmpdir):
# GIVEN a path to a folder with output data, and a subfolder
# corresponding to a particular output type
mock_path = tmpdir.mkdir('QC')
# AND the output type folder contains output files for one or more
# samples with various levels of compression and nesting
mock_outputdata = {
1: 'lib1111_C00000XX',
2: 'lib2222_C00000XX'
}
for i in range(2):
mock_sampledir = mock_path.mkdir(mock_outputdata[i+1])
mock_zipdir = mock_sampledir.mkdir('qc{}'.format(i))
mock_zipdir.ensure('fastqc_data.txt')
shutil.make_archive(str(mock_zipdir), 'zip',
str(mock_zipdir))
shutil.rmtree(str(mock_zipdir))
# AND a cleaner object is created for the path
outputcleaner = postprocessing.OutputCleaner(
path=str(tmpdir)
)
# WHEN output files for the folder are "cleaned" to resolve unwanted
# compression, nesting, or deprecated filenames
outputcleaner.clean_outputs()
# THEN the updated output organization should match expected results
logger.debug(''.format(mock_path.listdir()))
assert (len(mock_path.listdir()) == 4)
assert ('lib1111_C00000XX_fastqc_qc.txt' in
[os.path.basename(str(f)) for f in mock_path.listdir()])
| mit |
thomasgibson/tabula-rasa | HDG_CG_comp/table_cg_hdg_breakdown.py | 1 | 5273 | import os
import pandas as pd
hdg_params = [(64, 1), (64, 2), (64, 3)]
hdg_data = ["results/HDG_data_N%d_deg%d.csv" % param
for param in hdg_params]
cg_params = [(64, 2), (64, 3), (64, 4)]
cg_data = ["results/CG_data_N%d_deg%d.csv" % param
for param in cg_params]
for d in hdg_data + cg_data:
if not os.path.exists(d):
import sys
print("Cannot find data file '%s'" % d)
sys.exit(1)
table = r"""\begin{tabular}{lcccccc}
\hline
\multirow{2}{*}{Stage}
& \multicolumn{2}{c}{$HDG_1$}
& \multicolumn{2}{c}{$HDG_2$}
& \multicolumn{2}{c}{$HDG_3$}
\\
& $t_{\text{stage}}$ (s) & \% $t_{\text{total}}$
& $t_{\text{stage}}$ (s) & \% $t_{\text{total}}$
& $t_{\text{stage}}$ (s) & \% $t_{\text{total}}$ \\ \hline
"""
hdg_dfs = [pd.read_csv(d) for d in hdg_data]
lformat = r"""{stage} & {t1: .2f} & {p1: .2f} \% & {t2: .2f} & {p2: .2f} \% & {t3: .2f} & {p3: .2f} \% \\
"""
df1, df2, df3 = hdg_dfs
recovery1 = df1.HDGRecover.values[0]
pp1 = df1.HDGPPTime.values[0]
rhs1 = df1.HDGRhs.values[0]
trace1 = df1.HDGTraceSolve.values[0]
assembly1 = df1.HDGUpdate.values[0]
# residual1 = df1.SNESFunctionEval.values[0]
total1 = recovery1 + pp1 + rhs1 + trace1 + assembly1 # + residual1)
snes1 = df1.SNESSolve.values[0]
recovery2 = df2.HDGRecover.values[0]
pp2 = df2.HDGPPTime.values[0]
rhs2 = df2.HDGRhs.values[0]
trace2 = df2.HDGTraceSolve.values[0]
assembly2 = df2.HDGUpdate.values[0]
# residual2 = df2.SNESFunctionEval.values[0]
total2 = recovery2 + pp2 + rhs2 + trace2 + assembly2 # + residual2)
snes2 = df2.SNESSolve.values[0]
recovery3 = df3.HDGRecover.values[0]
pp3 = df3.HDGPPTime.values[0]
rhs3 = df3.HDGRhs.values[0]
trace3 = df3.HDGTraceSolve.values[0]
assembly3 = df3.HDGUpdate.values[0]
# residual3 = df3.SNESFunctionEval.values[0]
total3 = recovery3 + pp3 + rhs3 + trace3 + assembly3 # + residual3
snes3 = df3.SNESSolve.values[0]
table += lformat.format(stage="Matrix assembly (static cond.)",
t1=assembly1,
p1=assembly1/total1 * 100.,
t2=assembly2,
p2=assembly2/total2 * 100.,
t3=assembly3,
p3=assembly3/total3 * 100.)
table += lformat.format(stage="Forward elimination",
t1=rhs1,
p1=rhs1/total1 * 100.,
t2=rhs2,
p2=rhs2/total2 * 100.,
t3=rhs3,
p3=rhs3/total3 * 100.)
table += lformat.format(stage="Trace solve",
t1=trace1,
p1=trace1/total1 * 100.,
t2=trace2,
p2=trace2/total2 * 100.,
t3=trace3,
p3=trace3/total3 * 100.)
table += lformat.format(stage="Back substitution",
t1=recovery1,
p1=recovery1/total1 * 100.,
t2=recovery2,
p2=recovery2/total2 * 100.,
t3=recovery3,
p3=recovery3/total3 * 100.)
table += lformat.format(stage="Post processing",
t1=pp1,
p1=pp1/total1 * 100.,
t2=pp2,
p2=pp2/total2 * 100.,
t3=pp3,
p3=pp3/total3 * 100.)
table += r"""\hline
"""
table += r"""HDG Total & %.2f & & %.2f & & %.2f & \\ \hline""" % (total1,
total2,
total3)
table += r"""
& \multicolumn{2}{c}{$CG_2$}
& \multicolumn{2}{c}{$CG_3$}
& \multicolumn{2}{c}{$CG_4$}
\\
& $t_{\text{stage}}$ (s) & \% $t_{\text{total}}$
& $t_{\text{stage}}$ (s) & \% $t_{\text{total}}$
& $t_{\text{stage}}$ (s) & \% $t_{\text{total}}$ \\ \hline
"""
cg_dfs = [pd.read_csv(d) for d in cg_data]
df1, df2, df3 = cg_dfs
ksp_solve1 = df1.KSPSolve.values[0]
assembly1 = df1.SNESJacobianEval.values[0]
total1 = assembly1 + ksp_solve1
ksp_solve2 = df2.KSPSolve.values[0]
assembly2 = df2.SNESJacobianEval.values[0]
total2 = assembly2 + ksp_solve2
ksp_solve3 = df3.KSPSolve.values[0]
assembly3 = df3.SNESJacobianEval.values[0]
total3 = assembly3 + ksp_solve3
table += lformat.format(stage="Matrix assembly (monolithic)",
t1=assembly1,
p1=assembly1/total1 * 100.,
t2=assembly2,
p2=assembly2/total2 * 100.,
t3=assembly3,
p3=assembly3/total3 * 100.)
table += lformat.format(stage="Solve",
t1=ksp_solve1,
p1=ksp_solve1/total1 * 100.,
t2=ksp_solve2,
p2=ksp_solve2/total2 * 100.,
t3=ksp_solve3,
p3=ksp_solve3/total3 * 100.)
table += r"""\hline
"""
table += r"""CG Total & %.2f & & %.2f & & %.2f & \\ \hline""" % (total1,
total2,
total3)
table += r"""
\end{tabular}"""
print(table)
| mit |
automl/SpySMAC | cave/plot/configurator_footprint.py | 1 | 46564 | #!/bin/python
__author__ = "Marius Lindauer & Joshua Marben"
__copyright__ = "Copyright 2016, ML4AAD"
__license__ = "BSD"
__maintainer__ = "Joshua Marben"
__email__ = "marbenj@cs.uni-freiburg.de"
import copy
import logging
import os
import time
import numpy as np
from ConfigSpace import CategoricalHyperparameter
from ConfigSpace.configuration_space import Configuration, ConfigurationSpace
from ConfigSpace.util import impute_inactive_values
from bokeh.layouts import column, row, widgetbox
from bokeh.models import HoverTool, ColorBar, LinearColorMapper, BasicTicker, CustomJS, Slider
from bokeh.models.filters import GroupFilter, BooleanFilter
from bokeh.models.sources import CDSView
from bokeh.models.widgets import CheckboxButtonGroup, RadioButtonGroup, Button, Div
from bokeh.plotting import figure, ColumnDataSource
from sklearn.decomposition import PCA
from sklearn.manifold.mds import MDS
from sklearn.preprocessing import StandardScaler
from smac.epm.rf_with_instances import RandomForestWithInstances
from smac.runhistory.runhistory import RunHistory
from smac.scenario.scenario import Scenario
from smac.utils.constants import MAXINT
from cave.utils.convert_for_epm import convert_data_for_epm
from cave.utils.helpers import escape_parameter_name, get_config_origin, combine_runhistories
from cave.utils.io import export_bokeh
from cave.utils.timing import timing
class ConfiguratorFootprintPlotter(object):
def __init__(self,
scenario: Scenario,
rhs: RunHistory,
incs: list=None,
final_incumbent=None,
rh_labels=None,
max_plot: int=-1,
contour_step_size=0.2,
use_timeslider: bool=False,
num_quantiles: int=10,
timeslider_log: bool=True,
rng=None,
output_dir: str=None,
):
"""
Creating an interactive plot, visualizing the configuration search space.
The runhistories are correlated to the individual runs.
Each run consists of a runhistory (in the smac-format), a list of incumbents
If the dict "additional_info" in the RunValues of the runhistory contains a nested dict with
additional_info["timestamps"]["finished"], using those timestamps to sort data
Parameters
----------
scenario: Scenario
scenario
rhs: List[RunHistory]
runhistories from configurator runs, only data collected during optimization (no validation!)
incs: List[List[Configuration]]
incumbents per run, last entry is final incumbent
final_incumbent: Configuration
final configuration (best of all runs)
max_plot: int
maximum number of configs to plot, if -1 plot all
contour_step_size: float
step size of meshgrid to compute contour of fitness landscape
use_timeslider: bool
whether or not to have a time_slider-widget on cfp-plot
INCREASES FILE-SIZE DRAMATICALLY
num_quantiles: int
number of quantiles for the slider/ number of static pictures
timeslider_log: bool
whether to use a logarithmic scale for the timeslider/quantiles
rng: np.random.RandomState
random number generator
output_dir: str
output directory
"""
self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)
self.rng = rng
if rng is None:
self.rng = np.random.RandomState(42)
self.scenario = scenario
self.rhs = rhs
self.combined_rh = combine_runhistories(self.rhs)
self.incs = incs
self.rh_labels = rh_labels if rh_labels else [str(idx) for idx in range(len(self.rhs))]
self.max_plot = max_plot
self.use_timeslider = use_timeslider
self.num_quantiles = num_quantiles
self.contour_step_size = contour_step_size
self.output_dir = output_dir
self.timeslider_log = timeslider_log
# Preprocess input
self.default = scenario.cs.get_default_configuration()
self.final_incumbent = final_incumbent
self.configs_in_run = {label : rh.get_all_configs() for label, rh in zip(self.rh_labels, self.rhs)}
def run(self):
"""
Uses available Configurator-data to perform a MDS, estimate performance
data and plot the configurator footprint.
"""
default = self.scenario.cs.get_default_configuration()
self.combined_rh = self.reduce_runhistory(self.combined_rh, self.max_plot, keep=[a for b in self.incs for a in b]+[default])
conf_matrix, conf_list, runs_per_quantile, timeslider_labels = self.get_conf_matrix(self.combined_rh, self.incs)
self.logger.debug("Number of Configurations: %d", conf_matrix.shape[0])
dists = self.get_distance(conf_matrix, self.scenario.cs)
red_dists = self.get_mds(dists)
contour_data = {}
if not any([label.startswith('budget') for label in self.rh_labels]):
contour_data['combined'] = self.get_pred_surface(self.combined_rh, X_scaled=red_dists,
conf_list=copy.deepcopy(conf_list),
contour_step_size=self.contour_step_size)
for label, rh in zip(self.rh_labels, self.rhs):
contour_data[label] = self.get_pred_surface(self.combined_rh, X_scaled=red_dists,
conf_list=copy.deepcopy(conf_list),
contour_step_size=self.contour_step_size)
return self.plot(red_dists,
conf_list,
runs_per_quantile,
inc_list=self.incs,
contour_data=contour_data,
use_timeslider=self.use_timeslider,
timeslider_labels=timeslider_labels)
@timing
def get_pred_surface(self, rh, X_scaled, conf_list: list, contour_step_size):
"""fit epm on the scaled input dimension and
return data to plot a contour plot of the empirical performance
Parameters
----------
rh: RunHistory
runhistory
X_scaled: np.array
configurations in scaled 2dim
conf_list: list
list of Configuration objects
contour_step_size: float
step-size for contour
Returns
-------
contour_data: (np.array, np.array, np.array)
x, y, Z for contour plots
"""
# use PCA to reduce features to also at most 2 dims
scen = copy.deepcopy(self.scenario) # pca changes feats
if scen.feature_array.shape[1] > 2:
self.logger.debug("Use PCA to reduce features to from %d dim to 2 dim", scen.feature_array.shape[1])
# perform PCA
insts = scen.feature_dict.keys()
feature_array = np.array([scen.feature_dict[i] for i in insts])
feature_array = StandardScaler().fit_transform(feature_array)
feature_array = PCA(n_components=2).fit_transform(feature_array)
# inject in scenario-object
scen.feature_array = feature_array
scen.feature_dict = dict([(inst, feature_array[idx, :]) for idx, inst in enumerate(insts)])
scen.n_features = 2
# convert the data to train EPM on 2-dim featurespace (for contour-data)
self.logger.debug("Convert data for epm.")
X, y, types = convert_data_for_epm(scenario=scen, runhistory=rh, impute_inactive_parameters=True, logger=self.logger)
types = np.array(np.zeros((2 + scen.feature_array.shape[1])), dtype=np.uint)
num_params = len(scen.cs.get_hyperparameters())
# impute missing values in configs and insert MDS'ed (2dim) configs to the right positions
conf_dict = {}
# Remove forbidden clauses (this is necessary to enable the impute_inactive_values-method, see #226)
cs_no_forbidden = copy.deepcopy(conf_list[0].configuration_space)
cs_no_forbidden.forbidden_clauses = []
for idx, c in enumerate(conf_list):
c.configuration_space = cs_no_forbidden
conf_list[idx] = impute_inactive_values(c)
conf_dict[str(conf_list[idx].get_array())] = X_scaled[idx, :]
# Debug compare elements:
c1, c2 = {str(z) for z in X}, {str(z) for z in conf_dict.keys()}
self.logger.debug("{} elements not in both sets, {} elements in both sets, X (len {}) and conf_dict (len {}) "
"(might be a problem related to forbidden clauses?)".format(len(c1 ^ c2), len(c1 & c2), len(c1 ^ c2), len(c1), len(c2)))
# self.logger.debug("Elements: {}".format(str(c1 ^ c2)))
X_trans = [] # X_trans is the same as X but with reduced 2-dim features (so shape is (N, 2) instead of (N, M))
for x in X:
x_scaled_conf = conf_dict[str(x[:num_params])]
# append scaled config + pca'ed features (total of 4 values) per config/feature-sample
X_trans.append(np.concatenate((x_scaled_conf, x[num_params:]), axis=0))
X_trans = np.array(X_trans)
self.logger.debug("Train random forest for contour-plot. Shape of X: {}, shape of X_trans: {}".format(X.shape, X_trans.shape))
self.logger.debug("Faking configspace to be able to train rf...")
# We need to fake config-space bypass imputation of inactive values in random forest implementation
fake_cs = ConfigurationSpace(name="fake-cs-for-configurator-footprint")
bounds = np.array([(0, np.nan), (0, np.nan)], dtype=object)
model = RandomForestWithInstances(fake_cs,
types, bounds,
seed = self.rng.randint(MAXINT),
instance_features=np.array(scen.feature_array),
ratio_features=1.0)
start = time.time()
model.train(X_trans, y)
self.logger.debug("Fitting random forest took %f time", time.time() - start)
x_min, x_max = X_scaled[:, 0].min() - 1, X_scaled[:, 0].max() + 1
y_min, y_max = X_scaled[:, 1].min() - 1, X_scaled[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, contour_step_size),
np.arange(y_min, y_max, contour_step_size))
self.logger.debug("x_min: %f, x_max: %f, y_min: %f, y_max: %f", x_min, x_max, y_min, y_max)
self.logger.debug("Predict on %d samples in grid to get surface (step-size: %f)",
np.c_[xx.ravel(), yy.ravel()].shape[0], contour_step_size)
start = time.time()
Z, _ = model.predict_marginalized_over_instances(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
self.logger.debug("Predicting random forest took %f time", time.time() - start)
return xx, yy, Z
@timing
def get_distance(self, conf_matrix, cs: ConfigurationSpace):
"""
Computes the distance between all pairs of configurations.
Parameters
----------
conf_matrx: np.array
numpy array with cols as parameter values
cs: ConfigurationSpace
ConfigurationSpace to get conditionalities
Returns
-------
dists: np.array
np.array with distances between configurations i,j in dists[i,j] or dists[j,i]
"""
self.logger.debug("Calculate distance between configurations.")
n_confs = conf_matrix.shape[0]
dists = np.zeros((n_confs, n_confs))
is_cat = []
depth = []
for _, param in cs._hyperparameters.items():
if type(param) == CategoricalHyperparameter:
is_cat.append(True)
else:
is_cat.append(False)
depth.append(self.get_depth(cs, param))
is_cat = np.array(is_cat)
depth = np.array(depth)
# TODO tqdm
start = time.time()
for i in range(n_confs):
for j in range(i + 1, n_confs):
dist = np.abs(conf_matrix[i, :] - conf_matrix[j, :])
dist[np.isnan(dist)] = 1
dist[np.logical_and(is_cat, dist != 0)] = 1
dist = np.sum(dist / depth)
dists[i, j] = dist
dists[j, i] = dist
if 5 < n_confs and i % (n_confs // 5) == 0:
self.logger.debug("%.2f%% of all distances calculated in %.2f seconds...", 100 * i / n_confs,
time.time() - start)
return dists
def get_depth(self, cs: ConfigurationSpace, param: str):
"""
Get depth in configuration space of a given parameter name
breadth search until reaching a leaf for the first time
Parameters
----------
cs: ConfigurationSpace
ConfigurationSpace to get parents of a parameter
param: str
name of parameter to inspect
"""
parents = cs.get_parents_of(param)
if not parents:
return 1
new_parents = parents
d = 1
while new_parents:
d += 1
old_parents = new_parents
new_parents = []
for p in old_parents:
pp = cs.get_parents_of(p)
if pp:
new_parents.extend(pp)
else:
return d
@timing
def get_mds(self, dists):
"""
Compute multi-dimensional scaling (using sklearn MDS) -- nonlinear scaling
Parameters
----------
dists: np.array
full matrix of distances between all configurations
Returns
-------
np.array
scaled coordinates in 2-dim room
"""
# TODO there are ways to extend MDS to provide a transform-method. if
# available, train on randomly sampled configs and plot all
# TODO MDS provides 'n_jobs'-argument for parallel computing...
mds = MDS(n_components=2, dissimilarity="precomputed", random_state=12345)
dists = mds.fit_transform(dists)
self.logger.debug("MDS-stress: %f", mds.stress_)
return dists
def reduce_runhistory(self,
rh: RunHistory,
max_configs: int,
keep=None):
"""
Reduce configs to desired number, by default just drop the configs with the fewest runs.
Parameters
----------
rh: RunHistory
runhistory that is to be reduced
max_configs: int
if > -1 reduce runhistory to at most max_configs
keep: List[Configuration]
list of configs that should be kept for sure (e.g. default, incumbents)
Returns
-------
rh: RunHistory
reduced runhistory
"""
configs = rh.get_all_configs()
if max_configs <= 0 or max_configs > len(configs): # keep all
return rh
runs = [(c, len(rh.get_runs_for_config(c, only_max_observed_budget=False))) for c in configs]
if not keep:
keep = []
runs = sorted(runs, key=lambda x: x[1])[-self.max_plot:]
keep = [r[0] for r in runs] + keep
self.logger.info("Reducing number of configs from %d to %d, dropping from the fewest evaluations",
len(configs), len(keep))
new_rh = RunHistory()
for k, v in list(rh.data.items()):
c = rh.ids_config[k.config_id]
if c in keep:
new_rh.add(config=rh.ids_config[k.config_id],
cost=v.cost, time=v.time, status=v.status,
instance_id=k.instance_id, seed=k.seed)
return new_rh
@timing
def get_conf_matrix(self, rh, incs):
"""
Iterates through runhistory to get a matrix of configurations (in
vector representation), a list of configurations and the number of
runs per configuration in a quantiled manner.
Parameters
----------
rh: RunHistory
smac.runhistory
incs: List[List[Configuration]]
incumbents of configurator runs, last entry is final incumbent
Returns
-------
conf_matrix: np.array
matrix of configurations in vector representation
conf_list: np.array
list of all Configuration objects that appeared in runhistory
the order of this list is used to determine all kinds of properties
in the plotting (but is arbitrarily determined)
runs_per_quantile: np.array
numpy array of runs per configuration per quantile
labels: List[str]
labels for timeslider (i.e. wallclock-times)
"""
conf_list = []
conf_matrix = []
# Get all configurations. Index of c in conf_list serves as identifier
for c in rh.get_all_configs():
if c not in conf_list:
conf_matrix.append(c.get_array())
conf_list.append(c)
for inc in [a for b in incs for a in b]:
if inc not in conf_list:
conf_matrix.append(inc.get_array())
conf_list.append(inc)
# Sanity check, number quantiles must be smaller than the number of configs
if self.num_quantiles >= len(conf_list):
self.logger.info("Number of quantiles %d bigger than number of configs %d, reducing to %d quantiles",
self.num_quantiles, len(conf_list), len(conf_list) - 1)
self.num_quantiles = len(conf_list) - 1
# We want to visualize the development over time, so we take
# screenshots of the number of runs per config at different points
# in (i.e. different quantiles of) the runhistory, LAST quantile
# is full history!!
labels, runs_per_quantile = self._get_runs_per_config_quantiled(rh, conf_list, quantiles=self.num_quantiles)
assert(len(runs_per_quantile) == self.num_quantiles)
# Get minimum and maximum for sizes of dots
self.min_runs_per_conf = min([i for i in runs_per_quantile[-1] if i > 0])
self.max_runs_per_conf = max(runs_per_quantile[-1])
self.logger.debug("Min runs per conf: %d, Max runs per conf: %d", self.min_runs_per_conf, self.max_runs_per_conf)
self.logger.debug("Gathered %d configurations from 1 runhistories." % len(conf_list))
runs_per_quantile = np.array([np.array(run) for run in runs_per_quantile])
return np.array(conf_matrix), np.array(conf_list), runs_per_quantile, labels
@timing
def _get_runs_per_config_quantiled(self, rh, conf_list, quantiles):
"""Returns a list of lists, each sublist representing the current state
at that timestep (quantile). The current state means a list of times
each config was evaluated at that timestep.
Parameters
----------
rh: RunHistory
rh to be split up
conf_list: list
list of all Configuration objects that appear in runhistory
quantiles: int
number of fractions to split rh into
Returns:
--------
labels: List[str]
labels for timeslider (i.e. wallclock-times)
runs_per_quantile: np.array
numpy array of runs per configuration per quantile
"""
runs_total = len(rh.data)
# Iterate over the runhistory's entries in ranges and creating each
# sublist from a "snapshot"-runhistory
labels, last_time_seen = [], -1 # label, means wallclocktime at splitting points
r_p_q_p_c = [] # runs per quantile per config
as_list = list(rh.data.items())
scale = np.geomspace if self.timeslider_log else np.linspace
# Trying to work with timestamps if they are available
timestamps = None
try:
as_list = sorted(as_list, key=lambda x: x[1].additional_info['timestamps']['finished'])
timestamps = [x[1].additional_info['timestamps']['finished'] for x in as_list]
time_ranges = scale(timestamps[0], timestamps[-1], num=quantiles+1, endpoint=True)
ranges = []
idx = 0
for time_idx, time in enumerate(time_ranges):
while len(timestamps) - 1 > idx and (timestamps[idx] < time or idx <= time_idx):
idx += 1
ranges.append(idx)
except (KeyError, TypeError) as err:
self.logger.debug(err)
self.logger.debug("Failed to sort by timestamps... only a reason to worry if this is BOHB-analysis")
ranges = [int(x) for x in scale(1, runs_total, num=quantiles+1)]
# Fix possible wrong values
ranges[0] = 0
ranges[-1] = len(as_list)
self.logger.debug("Creating %d quantiles with a total number of runs of %d", quantiles, runs_total)
self.logger.debug("Ranges: %s", str(ranges))
for r in range(len(ranges))[1:]:
if ranges[r] <= ranges[r-1]:
if ranges[r-1] + 1 >= len(as_list):
raise RuntimeError("There was a problem with the quantiles of the configuration footprint. "
"Please report this Error on \"https://github.com/automl/CAVE/issues\" and provide the debug.txt-file.")
ranges[r] = ranges[r-1] + 1
self.logger.debug("Fixed ranges to: %s", str(ranges))
# Sanity check
if not ranges[0] == 0 or not ranges[-1] == len(as_list) or not len(ranges) == quantiles + 1:
raise RuntimeError("Sanity check on range-creation in configurator footprint went wrong. "
"Please report this Error on \"https://github.com/automl/CAVE/issues\" and provide the debug.txt-file.")
tmp_rh = RunHistory()
for i, j in zip(ranges[:-1], ranges[1:]):
for idx in range(i, j):
k, v = as_list[idx]
tmp_rh.add(config=rh.ids_config[k.config_id],
cost=v.cost, time=v.time, status=v.status,
instance_id=k.instance_id, seed=k.seed,
additional_info=v.additional_info)
if timestamps:
labels.append("{0:.2f}".format(timestamps[j - 1]))
r_p_q_p_c.append([len(tmp_rh.get_runs_for_config(c, only_max_observed_budget=False)) for c in conf_list])
self.logger.debug("Labels: " + str(labels))
return labels, r_p_q_p_c
##################################################################################
##################################################################################
### PLOTTING # PLOTTING # PLOTTING # PLOTTING # PLOTTING # PLOTTING # PLOTTING ###
##################################################################################
##################################################################################
def _get_size(self, r_p_c):
"""Returns size of scattered points in dependency of runs per config
Parameters
----------
r_p_c: list[int]
list with runs per config in order of self.conf_list
Returns
-------
sizes: list[int]
list with appropriate sizes for dots
"""
normalization_factor = self.max_runs_per_conf - self.min_runs_per_conf
min_size, enlargement_factor = 5, 20
if normalization_factor == 0: # All configurations same size
normalization_factor = 1
min_size = 12
sizes = min_size + ((r_p_c - self.min_runs_per_conf) / normalization_factor) * enlargement_factor
sizes *= np.array([0 if r == 0 else 1 for r in r_p_c]) # 0 size if 0 runs
return sizes
def _get_color(self, types):
"""Determine appropriate color for all configurations
Parameters:
-----------
types: List[str]
type of configuration
Returns:
--------
colors: list
list of color per config
"""
colors = []
for t in types:
if t == "Default":
colors.append('orange')
elif "Incumbent" in t:
colors.append('red')
else:
colors.append('white')
return colors
@timing
def _plot_contour(self, p, contour_data, x_range, y_range):
"""Plot contour data.
Parameters
----------
p: bokeh.plotting.figure
figure to be drawn upon
contour_data: Dict[str -> np.array]
dict from labels to array with contour data
x_range: List[float, float]
min and max of x-axis
y_range: List[float, float]
min and max of y-axis
Returns
-------
handles: dict[str -> tuple(ImageGlyph, tuple(float, float))]
mapping from label to image glyph and min/max-tuple
"""
unique = np.unique(np.concatenate([contour_data[label][2] for label in contour_data.keys()]))
color_mapper = LinearColorMapper(palette="Viridis256", low=np.min(unique), high=np.max(unique))
handles = {}
default_label = 'combined' if 'combined' in contour_data.keys() else list(contour_data.keys())[0]
for label, data in contour_data.items():
unique = np.unique(contour_data[label][2])
handles[label] = (p.image(image=contour_data[label], x=x_range[0], y=y_range[0],
dw=x_range[1] - x_range[0], dh=y_range[1] - y_range[0],
color_mapper=color_mapper),
(np.min(unique), np.max(unique)))
if not label == default_label and len(contour_data) > 1:
handles[label][0].visible = False
color_bar = ColorBar(color_mapper=color_mapper,
ticker=BasicTicker(desired_num_ticks=15),
label_standoff=12,
border_line_color=None, location=(0, 0))
color_bar.major_label_text_font_size = '12pt'
p.add_layout(color_bar, 'right')
return handles, color_mapper
def _create_views(self, source, used_configs):
"""Create views in order of plotting, so more interesting views are
plotted on top. Order of interest:
default > final-incumbent > incumbent > candidate
local > random
num_runs (ascending, more evaluated -> more interesting)
Individual views are necessary, since bokeh can only plot one
marker-type (circle, triangle, ...) per 'scatter'-call
Parameters
----------:
source: ColumnDataSource
containing relevant information for plotting
used_configs: List[Configuration]
configs that are contained in this source. necessary to plot glyphs for the independent runs so they can be
toggled. not all configs are in every source because of efficiency: no need to have 0-runs configs
Returns
-------
views: List[CDSView]
views in order of plotting
views_by_run: Dict[ConfiguratorRun -> List[int]]
maps each run to a list of indices of the related glyphs in the returned 'views'-list
markers: List[string]
markers (to the view with the same index)
"""
def _get_marker(t, o):
""" returns marker according to type t and origin o """
if t == "Default":
shape = 'triangle'
elif t == 'Final Incumbent':
shape = 'inverted_triangle'
else:
shape = 'square' if t == "Incumbent" else 'circle'
shape += '_x' if o.startswith("Acquisition Function") else ''
return shape
views, markers = [], []
views_by_run = {run : [] for run in self.configs_in_run}
idx = 0
for t in ['Candidate', 'Incumbent', 'Final Incumbent', 'Default']:
for o in ['Unknown', 'Random', 'Acquisition Function']:
for z in sorted(list(set(source.data['zorder'])), key=lambda x: int(x)):
for run, configs in self.configs_in_run.items():
booleans = [True if c in configs else False for c in used_configs]
view = CDSView(source=source, filters=[
GroupFilter(column_name='type', group=t),
GroupFilter(column_name='origin', group=o),
GroupFilter(column_name='zorder', group=z),
BooleanFilter(booleans)])
views.append(view) # all views
views_by_run[run].append(idx) # views sorted by runs
idx += 1
markers.append(_get_marker(t, o))
self.logger.debug("%d different glyph renderers, %d different zorder-values",
len(views), len(set(source.data['zorder'])))
return (views, views_by_run, markers)
@timing
def _scatter(self, p, source, views, markers):
"""
Parameters
----------
p: bokeh.plotting.figure
figure
source: ColumnDataSource
data container
views: List[CDSView]
list with views to be plotted (in order!)
markers: List[str]
corresponding markers to the views
Returns
-------
scatter_handles: List[GlyphRenderer]
glyph renderer per view
"""
scatter_handles = []
for view, marker in zip(views, markers):
scatter_handles.append(p.scatter(x='x', y='y',
source=source,
view=view,
color='color', line_color='black',
size='size',
marker=marker,
))
return scatter_handles
def _plot_get_source(self,
conf_list,
runs,
X,
inc_list,
hp_names):
"""
Create ColumnDataSource with all the necessary data
Contains for each configuration evaluated on any run:
- all parameters and values
- origin (if conflicting, origin from best run counts)
- type (default, incumbent or candidate)
- # of runs
- size
- color
Parameters
----------
conf_list: list[Configuration]
configurations
runs: list[int]
runs per configuration (same order as conf_list)
X: np.array
configuration-parameters as 2-dimensional array
inc_list: list[Configuration]
incumbents for this conf-run
hp_names: list[str]
names of hyperparameters
Returns
-------
source: ColumnDataSource
source with attributes as requested
conf_list: List[Configuration]
filtered conf_list with only configs we actually plot (i.e. > 0 runs)
"""
# Remove all configurations without any runs
keep = [i for i in range(len(runs)) if runs[i] > 0]
runs = np.array(runs)[keep]
conf_list = np.array(conf_list)[keep]
X = X[keep]
inc_list = [a for b in inc_list for a in b]
source = ColumnDataSource(data=dict(x=X[:, 0], y=X[:, 1]))
for k in hp_names: # Add parameters for each config
source.add([c[k] if c[k] else "None" for c in conf_list], escape_parameter_name(k))
conf_types = ["Default" if c == self.default else "Final Incumbent" if c == self.final_incumbent
else "Incumbent" if c in inc_list else "Candidate" for c in conf_list]
# We group "Local Search" and "Random Search (sorted)" both into local
origins = [get_config_origin(c) for c in conf_list]
source.add(conf_types, 'type')
source.add(origins, 'origin')
sizes = self._get_size(runs)
sizes = [s * 3 if conf_types[idx] == "Default" else s for idx, s in enumerate(sizes)]
source.add(sizes, 'size')
source.add(self._get_color(source.data['type']), 'color')
source.add(runs, 'runs')
# To enforce zorder, we categorize all entries according to their size
# Since we plot all different zorder-levels sequentially, we use a
# manually defined level of influence
num_bins = 20 # How fine-grained the size-ordering should be
min_size, max_size = min(source.data['size']), max(source.data['size'])
step_size = (max_size - min_size) / num_bins
if step_size == 0:
step_size = 1
zorder = [str(int((s - min_size) / step_size)) for s in source.data['size']]
source.add(zorder, 'zorder') # string, so we can apply group filter
return source, conf_list
def plot(self,
X,
conf_list: list,
runs_per_quantile,
inc_list: list=None,
contour_data=None,
use_timeslider=False,
use_checkbox=True,
timeslider_labels=None):
"""
plots sampled configuration in 2d-space;
uses bokeh for interactive plot
saves results in self.output, if set
Parameters
----------
X: np.array
np.array with 2-d coordinates for each configuration
conf_list: list
list of ALL configurations in the same order as X
runs_per_quantile: list[np.array]
configurator-run to be analyzed, as a np.array with
the number of target-algorithm-runs per config per quantile.
inc_list: list
list of incumbents (Configuration)
contour_data: list
contour data (xx,yy,Z)
use_timeslider: bool
whether or not to have a time_slider-widget on cfp-plot
INCREASES FILE-SIZE DRAMATICALLY
use_checkbox: bool
have checkboxes to toggle individual runs
Returns
-------
(script, div): str
script and div of the bokeh-figure
over_time_paths: List[str]
list with paths to the different quantiled timesteps of the
configurator run (for static evaluation)
"""
if not inc_list:
inc_list = []
over_time_paths = [] # development of the search space over time
hp_names = [k.name for k in # Hyperparameter names
conf_list[0].configuration_space.get_hyperparameters()]
# bokeh-figure
x_range = [min(X[:, 0]) - 1, max(X[:, 0]) + 1]
y_range = [min(X[:, 1]) - 1, max(X[:, 1]) + 1]
# Get individual sources for quantiles
sources, used_configs = zip(*[self._plot_get_source(conf_list, quantiled_run, X, inc_list, hp_names)
for quantiled_run in runs_per_quantile])
# We collect all glyphs in one list
# Then we have to dicts to identify groups of glyphs (for interactivity)
# They map the name of the group to a list of indices (of the respective glyphs that are in the group)
# Those indices refer to the main list of all glyphs
# This is necessary to enable interactivity for two inputs at the same time
all_glyphs = []
overtime_groups = {}
run_groups = {run : [] for run in self.configs_in_run.keys()}
# Iterate over quantiles (this updates overtime_groups)
for idx, source, u_cfgs in zip(range(len(sources)), sources, used_configs):
# Create new plot if necessary (only plot all quantiles in one single plot if timeslider is on)
if not use_timeslider or idx == 0:
p = self._create_figure(x_range, y_range)
if contour_data is not None: # TODO
contour_handles, color_mapper = self._plot_contour(p, contour_data, x_range, y_range)
# Create views and scatter
views, views_by_run, markers = self._create_views(source, u_cfgs)
scatter_handles = self._scatter(p, source, views, markers)
self.logger.debug("Quantile %d: %d scatter-handles", idx, len(scatter_handles))
if len(scatter_handles) == 0:
self.logger.debug("No configs in quantile %d (?!)", idx)
continue
# Add to groups
start = len(all_glyphs)
all_glyphs.extend(scatter_handles)
overtime_groups[str(idx)] = [str(i) for i in range(start, len(all_glyphs))]
for run, indices in views_by_run.items():
run_groups[run].extend([str(start + i) for i in indices])
# Write to file
if self.output_dir:
file_path = "cfp_over_time/configurator_footprint" + str(idx) + ".png"
over_time_paths.append(os.path.join(self.output_dir, file_path))
self.logger.debug("Saving plot to %s", over_time_paths[-1])
export_bokeh(p, over_time_paths[-1], self.logger)
# Add hovertool (define what appears in tooltips)
# TODO add only important parameters (needs to change order of exec pimp before conf-footprints)
hover = HoverTool(tooltips=[('type', '@type'), ('origin', '@origin'), ('runs', '@runs')] +
[(k, '@' + escape_parameter_name(k)) for k in hp_names],
renderers=all_glyphs)
p.add_tools(hover)
# Build dashboard
timeslider, checkbox, select_all, select_none, checkbox_title = self._get_widgets(all_glyphs, overtime_groups, run_groups,
slider_labels=timeslider_labels)
contour_checkbox, contour_title = self._contour_radiobuttongroup(contour_handles, color_mapper)
layout = p
if use_timeslider:
self.logger.debug("Adding timeslider")
layout = column(layout, widgetbox(timeslider))
if use_checkbox:
self.logger.debug("Adding checkboxes")
layout = row(layout,
column(widgetbox(checkbox_title),
widgetbox(checkbox),
row(widgetbox(select_all, width=100),
widgetbox(select_none, width=100)),
widgetbox(contour_title),
widgetbox(contour_checkbox)))
if self.output_dir:
path = os.path.join(self.output_dir, "content/images/configurator_footprint.png")
export_bokeh(p, path, self.logger)
return layout, over_time_paths
def _get_widgets(self, all_glyphs, overtime_groups, run_groups, slider_labels=None):
"""Combine timeslider for quantiles and checkboxes for individual runs in a single javascript-snippet
Parameters
----------
all_glyphs: List[Glyph]
togglable bokeh-glyphs
overtime_groups, run_groups: Dicŧ[str -> List[int]
mapping labels to indices of the all_glyphs-list
slider_labels: Union[None, List[str]]
if provided, used as labels for timeslider-widget
Returns
-------
time_slider, checkbox, select_all, select_none: Widget
desired interlayed bokeh-widgets
checkbox_title: Div
text-element to "show title" of checkbox
"""
aliases = ['glyph' + str(idx) for idx, _ in enumerate(all_glyphs)]
labels_overtime = list(overtime_groups.keys())
labels_runs = list(run_groups.keys())
code = ""
# Define javascript variable with important arrays
code += "var glyphs = [" + ", ".join(aliases) + "];"
code += "var overtime = [" + ','.join(['[' + ','.join(overtime_groups[l]) + ']' for l in labels_overtime]) + '];'
code += "var runs = [" + ','.join(['[' + ','.join(run_groups[l]) + ']' for l in labels_runs]) + '];'
# Deactivate all glyphs
code += """
glyphs.forEach(function(g) {
g.visible = false;
})"""
# Add function for array-union (to combine all relevant glyphs for the different runs)
code += """
// union function
function union_arrays(x, y) {
var obj = {};
for (var i = x.length-1; i >= 0; -- i)
obj[x[i]] = x[i];
for (var i = y.length-1; i >= 0; -- i)
obj[y[i]] = y[i];
var res = []
for (var k in obj) {
if (obj.hasOwnProperty(k)) // <-- optional
res.push(obj[k]);
}
return res;
}"""
# Add logging
code += """
console.log("Timeslider: " + time_slider.value);
console.log("Checkbox: " + checkbox.active);"""
# Set timeslider title (to enable log-scale and print wallclocktime-labels)
if slider_labels:
code += "var slider_labels = " + str(slider_labels) + ";"
code += "console.log(\"Detected slider_labels: \" + slider_labels);"
code += "time_slider.title = \"Until wallclocktime \" + slider_labels[time_slider.value - 1] + \". Step no.\"; "
title = "Until wallclocktime " + slider_labels[-1] + ". Step no. "
else:
title = "Quantile on {} scale".format("logarithmic" if self.timeslider_log else "linear")
code += "time_slider.title = \"{}\";".format(title);
# Combine checkbox-arrays, intersect with time_slider and set all selected glyphs to true
code += """
var activate = [];
// if we want multiple checkboxes at the same time, we need to combine the arrays
checkbox.active.forEach(function(c) {
activate = union_arrays(activate, runs[c]);
})
// now the intersection of timeslider-activated and checkbox-activated
activate = activate.filter(value => -1 !== overtime[time_slider.value - 1].indexOf(value));
activate.forEach(function(idx) {
glyphs[idx].visible = true;
})
"""
num_quantiles = len(overtime_groups)
if num_quantiles > 1:
timeslider = Slider(start=1, end=num_quantiles, value=num_quantiles, step=1, title=title)
else:
timeslider = Slider(start=1, end=2, value=1)
labels_runs = [label.replace('_', ' ') if label.startswith('budget') else label for label in labels_runs]
checkbox = CheckboxButtonGroup(labels=labels_runs, active=list(range(len(labels_runs))))
args = {name: glyph for name, glyph in zip(aliases, all_glyphs)}
args['time_slider'] = timeslider
args['checkbox'] = checkbox
callback = CustomJS(args=args, code=code)
timeslider.js_on_change('value', callback)
checkbox.callback = callback
checkbox_title = Div(text="Showing only configurations evaluated in:")
# Add all/none button to checkbox
code_all = "checkbox.active = " + str(list(range(len(labels_runs)))) + ";" + code
code_none = "checkbox.active = [];" + code
select_all = Button(label="All", callback=CustomJS(args=args, code=code_all))
select_none = Button(label="None", callback=CustomJS(args=args, code=code_none))
return timeslider, checkbox, select_all, select_none, checkbox_title
def _contour_radiobuttongroup(self, contour_data, color_mapper):
"""
Returns
-------
radiobuttongroup: RadioButtonGroup
radiobuttongroup widget to select one of the elements
title: Div
text-element to "show title" of widget
"""
labels = [l.replace('_', ' ') if l.startswith('budget') else l for l in contour_data.keys()]
aliases = ['glyph' + str(i) for i in range(len(labels))]
values = list(contour_data.values())
glyphs = [v[0] for v in values]
mins = [v[1][0] for v in values]
maxs = [v[1][1] for v in values]
args = {name: glyph for name, glyph in zip(aliases, glyphs)}
args['colormapper'] = color_mapper
# Create javascript-code
code = "var len_labels = " + str(len(aliases)) + ","
code += "glyphs = [ " + ','.join(aliases) + '],'
code += "mins = " + str(mins) + ','
code += "maxs = " + str(maxs) + ';'
code += """
for (i = 0; i < len_labels; i++) {
if (cb_obj.active === i) {
// console.log('Setting to true: ' + i);
glyphs[i].visible = true;
colormapper.low = mins[i];
colormapper.high = maxs[i];
} else {
// console.log('Setting to false: ' + i);
glyphs[i].visible = false;
}
}
"""
# Create the actual checkbox-widget
callback = CustomJS(args=args, code=code)
radio = RadioButtonGroup(labels=labels, active=0, callback=callback)
title = Div(text="Data used to estimate contour-plot")
return radio, title
def _create_figure(self, x_range, y_range):
p = figure(plot_height=500, plot_width=600,
tools=['save', 'box_zoom', 'wheel_zoom', 'reset'],
x_range=x_range, y_range=y_range)
p.xaxis.axis_label = "MDS-X"
p.yaxis.axis_label = "MDS-Y"
p.xaxis.axis_label_text_font_size = "15pt"
p.yaxis.axis_label_text_font_size = "15pt"
p.xaxis.major_label_text_font_size = "12pt"
p.yaxis.major_label_text_font_size = "12pt"
p.title.text_font_size = "15pt"
return p
| bsd-3-clause |
sofiane87/lasagne-GAN | context_encoder/context_encoder.py | 1 | 9629 | from __future__ import print_function
from keras.datasets import cifar10
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
from keras.layers import MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import losses
from keras.utils import to_categorical
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
class ContextEncoder():
def __init__(self):
self.img_rows = 32
self.img_cols = 32
self.mask_height = 8
self.mask_width = 8
self.channels = 3
self.num_classes = 2
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.missing_shape = (self.mask_height, self.mask_width, self.channels)
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# Build and compile the generator
self.generator = self.build_generator()
self.generator.compile(loss=['binary_crossentropy'],
optimizer=optimizer)
# The generator takes noise as input and generates the missing
# part of the image
masked_img = Input(shape=self.img_shape)
gen_missing = self.generator(masked_img)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated images as input and determines
# if it is generated or if it is a real image
valid = self.discriminator(gen_missing)
# The combined model (stacked generator and discriminator) takes
# masked_img as input => generates missing image => determines validity
self.combined = Model(masked_img , [gen_missing, valid])
self.combined.compile(loss=['mse', 'binary_crossentropy'],
loss_weights=[0.999, 0.001],
optimizer=optimizer)
def build_generator(self):
model = Sequential()
# Encoder
model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(512, kernel_size=1, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.5))
# Decoder
model.add(UpSampling2D())
model.add(Conv2D(128, kernel_size=3, padding="same"))
model.add(Activation('relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, kernel_size=3, padding="same"))
model.add(Activation('relu'))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(self.channels, kernel_size=3, padding="same"))
model.add(Activation('tanh'))
model.summary()
masked_img = Input(shape=self.img_shape)
gen_missing = model(masked_img)
return Model(masked_img, gen_missing)
def build_discriminator(self):
model = Sequential()
model.add(Conv2D(64, kernel_size=3, strides=2, input_shape=self.missing_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, kernel_size=3, padding="same"))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=self.missing_shape)
validity = model(img)
return Model(img, validity)
def mask_randomly(self, imgs):
y1 = np.random.randint(0, self.img_rows - self.mask_height, imgs.shape[0])
y2 = y1 + self.mask_height
x1 = np.random.randint(0, self.img_rows - self.mask_width, imgs.shape[0])
x2 = x1 + self.mask_width
masked_imgs = np.empty_like(imgs)
missing_parts = np.empty((imgs.shape[0], self.mask_height, self.mask_width, self.channels))
for i, img in enumerate(imgs):
masked_img = img.copy()
_y1, _y2, _x1, _x2 = y1[i], y2[i], x1[i], x2[i]
missing_parts[i] = masked_img[_y1:_y2, _x1:_x2, :].copy()
masked_img[_y1:_y2, _x1:_x2, :] = 0
masked_imgs[i] = masked_img
return masked_imgs, missing_parts, (y1, y2, x1, x2)
def train(self, epochs, batch_size=128, save_interval=50):
# Load the dataset
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = np.vstack((X_train, X_test))
y_train = np.vstack((y_train, y_test))
# Extract dogs and cats
X_cats = X_train[(y_train == 3).flatten()]
X_dogs = X_train[(y_train == 5).flatten()]
X_train = np.vstack((X_cats, X_dogs))
# Rescale -1 to 1
X_train = X_train / 255
X_train = 2 * X_train - 1
y_train = y_train.reshape(-1, 1)
half_batch = int(batch_size / 2)
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], half_batch)
imgs = X_train[idx]
masked_imgs, missing, _ = self.mask_randomly(imgs)
# Generate a half batch of new images
gen_missing = self.generator.predict(masked_imgs)
valid = np.ones((half_batch, 1))
fake = np.zeros((half_batch, 1))
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(missing, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_missing, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
masked_imgs, missing_parts, _ = self.mask_randomly(imgs)
# Generator wants the discriminator to label the generated images as valid
valid = np.ones((batch_size, 1))
# Train the generator
g_loss = self.combined.train_on_batch(masked_imgs, [missing_parts, valid])
# Plot the progress
print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f, mse: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[0], g_loss[1]))
# If at save interval => save generated image samples
if epoch % save_interval == 0:
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], 6)
imgs = X_train[idx]
self.save_imgs(epoch, imgs)
def save_imgs(self, epoch, imgs):
r, c = 3, 6
masked_imgs, missing_parts, (y1, y2, x1, x2) = self.mask_randomly(imgs)
gen_missing = self.generator.predict(masked_imgs)
imgs = 0.5 * imgs + 0.5
masked_imgs = 0.5 * masked_imgs + 0.5
gen_missing = 0.5 * gen_missing + 0.5
fig, axs = plt.subplots(r, c)
for i in range(c):
axs[0,i].imshow(imgs[i, :,:])
axs[0,i].axis('off')
axs[1,i].imshow(masked_imgs[i, :,:])
axs[1,i].axis('off')
filled_in = imgs[i].copy()
filled_in[y1[i]:y2[i], x1[i]:x2[i], :] = gen_missing[i]
axs[2,i].imshow(filled_in)
axs[2,i].axis('off')
fig.savefig("context_encoder/images/cifar_%d.png" % epoch)
plt.close()
def save_model(self):
def save(model, model_name):
model_path = "context_encoder/saved_model/%s.json" % model_name
weights_path = "context_encoder/saved_model/%s_weights.hdf5" % model_name
options = {"file_arch": model_path,
"file_weight": weights_path}
json_string = model.to_json()
open(options['file_arch'], 'w').write(json_string)
model.save_weights(options['file_weight'])
save(self.generator, "context_encoder_generator")
save(self.discriminator, "context_encoder_discriminator")
if __name__ == '__main__':
context_encoder = ContextEncoder()
context_encoder.train(epochs=30000, batch_size=64, save_interval=50)
| mit |
Mega-DatA-Lab/mxnet | example/ssd/dataset/pycocotools/coco.py | 29 | 19564 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
__author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
# from . import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
# rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
raise NotImplementedError("maskUtils disabled!")
else:
rle = [ann['segmentation']]
# m = maskUtils.decode(rle)
raise NotImplementedError("maskUtils disabled!")
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
# ann['area'] = maskUtils.area(ann['segmentation'])
raise NotImplementedError("maskUtils disabled!")
if not 'bbox' in ann:
# ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
raise NotImplementedError("maskUtils disabled!")
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
# rles = maskUtils.frPyObjects(segm, h, w)
# rle = maskUtils.merge(rles)
raise NotImplementedError("maskUtils disabled!")
elif type(segm['counts']) == list:
# uncompressed RLE
# rle = maskUtils.frPyObjects(segm, h, w)
raise NotImplementedError("maskUtils disabled!")
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
# m = maskUtils.decode(rle)
raise NotImplementedError("maskUtils disabled!")
return m
| apache-2.0 |
boland1992/SeisSuite | build/lib/ambient/spectrum/station_month_spectrum.py | 8 | 14156 | # -*- coding: utf-8 -*-
"""
Created on Fri July 6 11:04:03 2015
@author: boland
"""
import os
import glob
import scipy
import datetime
import numpy as np
import datetime as dt
import multiprocessing as mp
import matplotlib.pyplot as plt
from numpy.lib.stride_tricks import as_strided
from numpy.fft import rfft, irfft
from obspy import read_inventory
from scipy import signal
from obspy import read
from scipy.interpolate import interp1d
from pysismo import pscrosscorr
from pysismo.psconfig import (CROSSCORR_TMAX)
#PICKLE_PATH = '/home/boland/Desktop/XCORR-STACK_01.08.1999-10.06.2000\
#_datalesspaz.part.pickle'
#xc = pscrosscorr.load_pickled_xcorr(PICKLE_PATH)
# optimizing time-scale: max time = max distance / vmin (vmin = 2.5 km/s)
#maxdist = max([xc[s1][s2].dist() for s1, s2 in xc.pairs()])
#maxt = min(CROSSCORR_TMAX, maxdist / 2.5)
#plot distance plot of cross-correlations
#xc.plot(plot_type='distance', xlim=(-maxt, maxt),
# outfile="/home/boland/Desktop/something1342.png", showplot=False)
#------------------------------------------------------------------------------
# IMPORT PATHS TO MSEED FILES
#------------------------------------------------------------------------------
def spectrum(tr):
wave = tr.data #this is how to extract a data array from a mseed file
fs = tr.stats.sampling_rate
#hour = str(hour).zfill(2) #create correct format for eqstring
f, Pxx_spec = signal.welch(wave, fs, 'flattop', nperseg=1024, scaling='spectrum')
#plt.semilogy(f, np.sqrt(Pxx_spec))
if len(f) >= 256:
column = np.column_stack((f[:255], np.abs(np.sqrt(Pxx_spec)[:255])))
return column
else:
return 0.
# x = np.linspace(0, 10, 1000)
# f_interp = interp1d(np.sqrt(Pxx_spec),f, kind='cubic')
#x.reverse()
#y.reverse()
# print f_interp(x)
#f,np.sqrt(Pxx_spec),'o',
# plt.figure()
# plt.plot(x,f_interp(x),'-' )
# plt.show()
def paths_sort(path):
"""
Function defined for customised sorting of the abs_paths list
and will be used in conjunction with the sorted() built in python
function in order to produce file paths in chronological order.
"""
base_name = os.path.basename(path)
stat_name = base_name.split('.')[0]
date = base_name.split('.')[1]
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d')
return date, stat_name
except Exception as e:
a=4
def paths(folder_path, extension):
"""
Function that returns a list of desired absolute paths called abs_paths
of files that contains a given extension e.g. .txt should be entered as
folder_path, txt. This function will run recursively through and find
any and all files within this folder with that extension!
"""
abs_paths = []
for root, dirs, files in os.walk(folder_path):
for f in files:
fullpath = os.path.join(root, f)
if os.path.splitext(fullpath)[1] == '.{}'.format(extension):
abs_paths.append(fullpath)
abs_paths = sorted(abs_paths, key=paths_sort)
return abs_paths
folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
extension = 'mseed'
paths_list = paths(folder_path, extension)
t0_total = datetime.datetime.now()
for s in paths_list:
try:
split_path = s.split('/')
stat_info = split_path[-1][:-6]
month_year = split_path[-2]
subtitle = '{} | {}'.format(stat_info, month_year); print subtitle
outname = 'stations_gary/PDS_{}_{}.svg'.format(stat_info, month_year)
t0 = datetime.datetime.now()
st = read(s)
t1 = datetime.datetime.now()
print "time taken to import one month mseed was: ", t1-t0
# set up loop for all traces within each imported stream.
t0 = datetime.datetime.now()
pool = mp.Pool()
spectra = pool.map(spectrum, st[:])
pool.close()
pool.join()
t1 = datetime.datetime.now()
print "time taken to calculate monthly spectra: ", t1-t0
fig = plt.figure(figsize=(15,10))
ax = fig.add_subplot(111)
ax.set_title("Seismic Waveform Power Density Spectrum\n{}".format(subtitle))
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power Density Spectrum (V RMS)')
ax.set_xlim([0,10])
ax.grid(True, axis='both', color='gray')
ax.set_autoscaley_on(True)
ax.set_yscale('log')
#ax.set_ylim([-10, 1e4])
# Caclulate weighted average spectrum for this station for this month
spectra = np.asarray(spectra)
search = np.where(spectra==0.)
spectra = np.delete(spectra, search)
spectra = np.average(spectra, axis=0)
plt.plot(spectra[:,0], spectra[:,1], c='k')
fig.savefig(outname, format='svg', dpi=300)
plt.clf()
except:
a=5
#for spectrum in spectra:
quit()
#plt.plot(f, np.sqrt(Pxx_spec), alpha=alpha, c='k')
#plt.xlim([0,2])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_all.svg', format='svg', dpi=1000)
plt.xlim([0,1])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-1Hz.svg', format='svg', dpi=1000)
plt.xlim([0,2])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-2Hz.svg', format='svg', dpi=1000)
plt.xlim([0,3])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-3Hz.svg', format='svg', dpi=1000)
plt.xlim([0,4])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-4Hz.svg', format='svg', dpi=1000)
plt.xlim([0,5])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-5Hz.svg', format='svg', dpi=1000)
plt.xlim([0,6])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-6Hz.svg', format='svg', dpi=1000)
plt.xlim([0,7])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-7Hz.svg', format='svg', dpi=1000)
plt.xlim([0,8])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-8Hz.svg', format='svg', dpi=1000)
plt.xlim([0,9])
#plt.ylim([0,25000])
fig.savefig('Power Density Spectrum_0-9Hz.svg', format='svg', dpi=1000)
t1_total = datetime.datetime.now()
print "total time taken to process and plot all PDS: ", t1_total-t0_total
quit()
def get_stationxml_inventories(stationxml_dir, verbose=False):
"""
Reads inventories in all StationXML (*.xml) files
of specified dir
@type stationxml_dir: unicode or str
@type verbose: bool
@rtype: list of L{obspy.station.inventory.Inventory}
"""
inventories = []
# list of *.xml files
flist = glob.glob(pathname=os.path.join(stationxml_dir, "*.xml"))
if verbose:
if flist:
print "Reading inventory in StationXML file:",
else:
s = u"Could not find any StationXML file (*.xml) in dir: {}!"
print s.format(stationxml_dir)
for f in flist:
if verbose:
print os.path.basename(f),
inv = read_inventory(f, format='stationxml')
inventories.append(inv)
if flist and verbose:
print
return inventories
def spectrum(tr):
wave = tr.data #this is how to extract a data array from a mseed file
fs = tr.stats.sampling_rate
#hour = str(hour).zfill(2) #create correct format for eqstring
f, Pxx_spec = signal.welch(wave, fs, 'flattop', 1024, scaling='spectrum')
#plt.semilogy(f, np.sqrt(Pxx_spec))
plt.title("Frequency Density Plot of PNG Earthquake from station PMG.IU")
plt.plot(f, np.sqrt(Pxx_spec))
plt.xlim([0, 5])
plt.xlabel('frequency [Hz]')
plt.ylabel('Linear spectrum [V RMS]')
def resample(trace, dt_resample):
"""
Subroutine to resample trace
@type trace: L{obspy.core.trace.Trace}
@type dt_resample: float
@rtype: L{obspy.core.trace.Trace}
"""
dt = 1.0 / trace.stats.sampling_rate
factor = dt_resample / dt
if int(factor) == factor:
# simple decimation (no filt because it shifts the data)
trace.decimate(int(factor), no_filter=True)
else:
# linear interpolation
tp = np.arange(0, trace.stats.npts) * trace.stats.delta
zp = trace.data
ninterp = int(max(tp) / dt_resample) + 1
tinterp = np.arange(0, ninterp) * dt_resample
trace.data = np.interp(tinterp, tp, zp)
trace.stats.npts = ninterp
trace.stats.delta = dt_resample
trace.stats.sampling_rate = 1.0 / dt_resample
#trace.stats.endtime = trace.stats.endtime + max(tinterp)-max(tp)
def moving_avg(a, halfwindow, mask=None):
"""
Performs a fast n-point moving average of (the last
dimension of) array *a*, by using stride tricks to roll
a window on *a*.
Note that *halfwindow* gives the nb of points on each side,
so that n = 2*halfwindow + 1.
If *mask* is provided, values of *a* where mask = False are
skipped.
Returns an array of same size as *a* (which means that near
the edges, the averaging window is actually < *npt*).
"""
# padding array with zeros on the left and on the right:
# e.g., if halfwindow = 2:
# a_padded = [0 0 a0 a1 ... aN 0 0]
# mask_padded = [F F ? ? ? F F]
if mask is None:
mask = np.ones_like(a, dtype='bool')
zeros = np.zeros(a.shape[:-1] + (halfwindow,))
falses = zeros.astype('bool')
a_padded = np.concatenate((zeros, np.where(mask, a, 0), zeros), axis=-1)
mask_padded = np.concatenate((falses, mask, falses), axis=-1)
# rolling window on padded array using stride trick
#
# E.g., if halfwindow=2:
# rolling_a[:, 0] = [0 0 a0 a1 ... aN]
# rolling_a[:, 1] = [0 a0 a1 a2 ... aN 0 ]
# ...
# rolling_a[:, 4] = [a2 a3 ... aN 0 0]
npt = 2 * halfwindow + 1 # total size of the averaging window
rolling_a = as_strided(a_padded,
shape=a.shape + (npt,),
strides=a_padded.strides + (a.strides[-1],))
rolling_mask = as_strided(mask_padded,
shape=mask.shape + (npt,),
strides=mask_padded.strides + (mask.strides[-1],))
# moving average
n = rolling_mask.sum(axis=-1)
return np.where(n > 0, rolling_a.sum(axis=-1).astype('float') / n, np.nan)
def butterworth(trace):
#filter
#print("first filter")
trace.filter(type="bandpass",
freqmin =freqmin,
freqmax = freqmax,
corners=corners,
zerophase=zerophase)
return trace
def normal(trace,
freqmin_earthquake,
freqmax_earthquake):
# normalization of the signal by the running mean
# in the earthquake frequency band
trcopy = trace
#print("normalising filter")
trcopy.filter(type="bandpass",
freqmin=freqmin_earthquake,
freqmax=freqmax_earthquake,
corners=corners,
zerophase=zerophase)
# decimating trace
resample(trcopy, period_resample)
# Time-normalization weights from smoothed abs(data)
# Note that trace's data can be a masked array
halfwindow = int(round(window_time * trcopy.stats.sampling_rate / 2))
mask = ~trcopy.data.mask if np.ma.isMA(trcopy.data) else None
tnorm_w = moving_avg(np.abs(trcopy.data),
halfwindow=halfwindow,
mask=mask)
if np.ma.isMA(trcopy.data):
# turning time-normalization weights into a masked array
s = "[warning: {}.{} trace's data is a masked array]"
print s.format(trace.stats.network, trace.stats.station),
tnorm_w = np.ma.masked_array(tnorm_w, trcopy.data.mask)
# time-normalization
trace.data /= tnorm_w
return trace
def whiten(trace, window_freq, freqmin, freqmax, corners, zerophase):
"""
function that produces a whitened spectrum
"""
fft = rfft(trace.data) # real FFT
deltaf = trace.stats.sampling_rate / trace.stats.npts # frequency step
# smoothing amplitude spectrum
halfwindow = int(round(window_freq / deltaf / 2.0))
weight = moving_avg(abs(fft), halfwindow=halfwindow)
# normalizing spectrum and back to time domain
trace.data = irfft(fft / weight, n=len(trace.data))
# re bandpass to avoid low/high freq noise
#print("Whiten filter")
trace.filter(type="bandpass",
freqmin =freqmin,
freqmax = freqmax,
corners=corners,
zerophase=zerophase)
return trace
def preprocess(trace):
#trace.attach_response(inventories=xml_inventories)
trace = butterworth(trace)
#trace.remove_response(output="VEL", zero_mean=True)
#plt.figure()
#spectrum(trace)
#trace = normal(trace, freqmin_earthquake, freqmax_earthquake)
#plt.figure()
#spectrum(trace)
#print(trace.stats.sampling_rate)
trace = whiten(trace, window_freq, freqmin, freqmax, corners, zerophase)
#plt.figure()
#spectrum(trace)
return trace
xcorr = 0
freqmin = 1.0/25.0
freqmax = 1.0/1
corners = 1
zerophase = True
freqmin_earthquake = 1/50.0
freqmax_earthquake = 1/25.0
window_time = 0.5 * freqmax_earthquake
window_freq = 0.02
period_resample = 0.45
STATIONXML_DIR = '/storage/ANT/PROGRAMS/ANT_OUTPUT/INPUT/XML'
xml_inventories = []
sample_rate = 250
counts = 0
for time in times:
st0 = read(dir1, starttime=time, endtime=time + dt.timedelta(minutes=XCORR_INTERVAL))
st1 = read(dir2, starttime=time, endtime=time + dt.timedelta(minutes=XCORR_INTERVAL))
tr0 = st0[0]
tr1 = st1[0]
tr0.stats.sampling_rate = sample_rate
tr1.stats.sampling_rate = sample_rate
tr0 = preprocess(tr0)
tr1 = preprocess(tr1)
xcorr = scipy.signal.correlate(tr0, tr1, mode='same')
xcorr += xcorr
plt.figure(1)
plt.plot(xcorr)
plt.show()
print(counts)
counts +=1
import matplotlib.pyplot as plt
| gpl-3.0 |
scikit-garden/scikit-garden | skgarden/mondrian/ensemble/tests/test_forest_partial_fit.py | 1 | 3561 | import numpy as np
from sklearn.datasets import load_digits
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from skgarden import MondrianForestRegressor
from skgarden import MondrianForestClassifier
def check_partial_fit_equivalence(size_batch, f, random_state, X, y, is_clf=False):
start_ptr = list(range(0, 100, size_batch))
end_ptr = start_ptr[1:] + [100]
if not is_clf:
p_f = MondrianForestRegressor(random_state=random_state)
else:
p_f = MondrianForestClassifier(random_state=random_state)
for start, end in zip(start_ptr, end_ptr):
p_f.partial_fit(X[start:end], y[start:end])
for est, p_est in zip(f.estimators_, p_f.estimators_):
assert_array_equal(p_est.tree_.n_node_samples, est.tree_.n_node_samples)
assert_array_equal(p_est.tree_.threshold, est.tree_.threshold)
assert_array_equal(p_est.tree_.feature, est.tree_.feature)
assert_equal(p_est.tree_.root, est.tree_.root)
assert_array_equal(p_est.tree_.value, est.tree_.value)
assert_equal(est.tree_.n_node_samples[est.tree_.root], 100)
assert_equal(p_est.tree_.n_node_samples[est.tree_.root], 100)
def test_partial_fit_equivalence():
X, y = make_regression(random_state=0, n_samples=100)
mfr = MondrianForestRegressor(random_state=0)
mfr.partial_fit(X, y)
for batch_size in [10, 20, 25, 50, 90]:
check_partial_fit_equivalence(batch_size, mfr, 0, X, y)
X, y = make_classification(random_state=0, n_samples=100)
mtc = MondrianForestClassifier(random_state=0)
mtc.partial_fit(X, y)
for batch_size in [10, 20, 25, 50, 90]:
check_partial_fit_equivalence(batch_size, mtc, 0, X, y, is_clf=True)
def check_fit_after_partial_fit(ensemble, X, y):
ensemble.fit(X, y)
for est in ensemble.estimators_:
assert_equal(est.tree_.n_node_samples[0], 10)
ensemble.partial_fit(X, y)
for est in ensemble.estimators_:
assert_equal(est.tree_.n_node_samples[est.tree_.root], 10)
ensemble.partial_fit(X, y)
for est in ensemble.estimators_:
assert_equal(est.tree_.n_node_samples[est.tree_.root], 20)
def test_fit_after_partial_fit():
rng = np.random.RandomState(0)
X = rng.randn(10, 5)
y = np.floor(rng.randn(10))
mfr = MondrianForestRegressor(random_state=0)
check_fit_after_partial_fit(mfr, X, y)
mfc = MondrianForestClassifier(random_state=0)
check_fit_after_partial_fit(mfc, X, y)
def test_min_samples_split():
X_c, y_c = load_digits(return_X_y=True)
X_r, y_r = make_regression(n_samples=10000, random_state=0)
for mss in [2, 4, 10, 20]:
mfr = MondrianForestRegressor(random_state=0, min_samples_split=mss)
mfr.partial_fit(X_r[: X_r.shape[0] // 2], y_r[: X_r.shape[0] // 2])
mfr.partial_fit(X_r[X_r.shape[0] // 2:], y_r[X_r.shape[0] // 2:])
for est in mfr.estimators_:
n_node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert np.min(n_node_samples) + 1 > mss
mfc = MondrianForestClassifier(random_state=0, min_samples_split=mss)
mfc.partial_fit(X_c[: X_c.shape[0] // 2], y_c[: X_c.shape[0] // 2])
mfc.partial_fit(X_c[X_c.shape[0] // 2:], y_c[X_c.shape[0] // 2:])
for est in mfc.estimators_:
n_node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert np.min(n_node_samples) + 1 > mss
| bsd-3-clause |
hpi-xnor/BMXNet | example/speech_recognition/stt_utils.py | 44 | 5892 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import os.path
import numpy as np
import soundfile
from numpy.lib.stride_tricks import as_strided
logger = logging.getLogger(__name__)
def calc_feat_dim(window, max_freq):
return int(0.001 * window * max_freq) + 1
def conv_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def spectrogram(samples, fft_length=256, sample_rate=2, hop_length=128):
"""
Compute the spectrogram for a real signal.
The parameters follow the naming convention of
matplotlib.mlab.specgram
Args:
samples (1D array): input audio signal
fft_length (int): number of elements in fft window
sample_rate (scalar): sample rate
hop_length (int): hop length (relative offset between neighboring
fft windows).
Returns:
x (2D array): spectrogram [frequency x time]
freq (1D array): frequency of each row in x
Note:
This is a truncating computation e.g. if fft_length=10,
hop_length=5 and the signal has 23 elements, then the
last 3 elements will be truncated.
"""
assert not np.iscomplexobj(samples), "Must not pass in complex numbers"
window = np.hanning(fft_length)[:, None]
window_norm = np.sum(window ** 2)
# The scaling below follows the convention of
# matplotlib.mlab.specgram which is the same as
# matlabs specgram.
scale = window_norm * sample_rate
trunc = (len(samples) - fft_length) % hop_length
x = samples[:len(samples) - trunc]
# "stride trick" reshape to include overlap
nshape = (fft_length, (len(x) - fft_length) // hop_length + 1)
nstrides = (x.strides[0], x.strides[0] * hop_length)
x = as_strided(x, shape=nshape, strides=nstrides)
# window stride sanity check
assert np.all(x[:, 1] == samples[hop_length:(hop_length + fft_length)])
# broadcast window, compute fft over columns and square mod
# This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT).
x = np.fft.rfft(x * window, axis=0)
x = np.absolute(x) ** 2
# scale, 2.0 for everything except dc and fft_length/2
x[1:-1, :] *= (2.0 / scale)
x[(0, -1), :] /= scale
freqs = float(sample_rate) / fft_length * np.arange(x.shape[0])
return x, freqs
def spectrogram_from_file(filename, step=10, window=20, max_freq=None,
eps=1e-14, overwrite=False, save_feature_as_csvfile=False):
""" Calculate the log of linear spectrogram from FFT energy
Params:
filename (str): Path to the audio file
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
eps (float): Small value to ensure numerical stability (for ln(x))
"""
csvfilename = filename.replace(".wav", ".csv")
if (os.path.isfile(csvfilename) is False) or overwrite:
with soundfile.SoundFile(filename) as sound_file:
audio = sound_file.read(dtype='float32')
sample_rate = sound_file.samplerate
if audio.ndim >= 2:
audio = np.mean(audio, 1)
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of "
" sample rate")
if step > window:
raise ValueError("step size must not be greater than window size")
hop_length = int(0.001 * step * sample_rate)
fft_length = int(0.001 * window * sample_rate)
pxx, freqs = spectrogram(
audio, fft_length=fft_length, sample_rate=sample_rate,
hop_length=hop_length)
ind = np.where(freqs <= max_freq)[0][-1] + 1
res = np.transpose(np.log(pxx[:ind, :] + eps))
if save_feature_as_csvfile:
np.savetxt(csvfilename, res)
return res
else:
return np.loadtxt(csvfilename)
| apache-2.0 |
nomadcube/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
ferchault/iago | docs/conf.py | 1 | 8700 | # -*- coding: utf-8 -*-
#
# iago documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 28 18:57:03 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.intersphinx']
autodoc_mock_imports = ['pint',]
intersphinx_mapping = {
'python': ('https://docs.python.org/2', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('http://pandas-docs.github.io/pandas-docs-travis/', None),
'mdanalysis': ('http://pythonhosted.org/MDAnalysis', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'iago'
copyright = u'2016, Guido Falk von Rudorff'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'iagodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'iago.tex', u'iago Documentation',
u'Guido Falk von Rudorff', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'iago', u'iago Documentation',
[u'Guido Falk von Rudorff'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'iago', u'iago Documentation',
u'Guido Falk von Rudorff', 'iago', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
zsomko/visualcortex | python/spikes/article.py | 1 | 15133 | from numpy import *
from numpy.random import *
from plotting.plot_images import *
from plotting.myplot import *
from plotting.covariance_ellipse import *
from spikes.spikes import *
import matplotlib.image as img
import matplotlib.cm as cm
import matplotlib.gridspec as gridspec
def fig1b(make):
imgNr = 1500 # 3000
l = 50 # 500
# input ranges
u1 = 1.0
u2 = 10.0
ud = 1.0
c = 1.0
us = arange(u1, u2, ud)
ul = len(us)
pmeans = zeros(ul)
imeans = zeros(ul)
if(make == 1):
# nonlinearity parameters
k = 10.0
n = 1.1
ut = 1.9
for ui in range(ul):
print(ui)
pmeans[ui], haha, imeans[ui] = new_gpairs_match2(us[ui], us[ui], c, c, 0.2, 0.7, l, imgNr, k, n, ut, k, n, ut)[3:6]
save("txt/fig1b.npy", [pmeans, imeans])
else:
pmeans, imeans = load("txt/fig1b.npy")
tl = r"$Mean_{SC}$"
fn = "images/fig1b.png"
plt.clf()
plt.plot(us, pmeans, "b.", label = "Poisson", ms = 10)
# [a,b] = polyfit(pmean_arr, pfano_arr, 1)
# plt.plot(plt.xlim(), a*array(plt.xlim())+b, lw=5, color="blue")
plt.plot(us, imeans, "r.", label = "Integration", ms = 10)
# [a,b] = polyfit(imean_arr, ifano_arr, 1)
# plt.plot(plt.xlim(), a*array(plt.xlim())+b, lw=5, color="red")
extra = plt.legend(bbox_to_anchor=(1, 0.5), loc='center left', ncol=1, fontsize = 30)
myplot(r"$Mean_{u} (mV)$", r"$Mean_{SC} (sp/s)$", tl, fn, xlog=False, legend=False, extra=(extra,), fsax=50, xloc=False)
def fig3b_o(make=1):
# orientation changes: variances stay, means increase then decresase. (in membrane potentials)
# the case of 30, 31.5
# for poisson
#3.3 0.8
#31.9687644444 30.136
# for integration
#1.0 19.6
#31.1467831111 30.3
# saved with name_fit
# 2%
times = 3
# m_p = 3.3
# v_p = 0.8
# m_i = 1.0
# v_i = 19.6
m_p = 1.2
v_p = 0.2
m_i = 0.9
v_i = 2.3
if(make==1):
l = 50
imgNr = 30000 #30000
k = 10
# ne = 1.1
# ut = 1.9
# corr_i = 0.15
# corr_p = 0.95
ne = 1.4
ut = 0
corr_i = 0.15
corr_p = 0.85
i_fano = v_i/m_i;
p_fano = v_p/m_p;
ui_arr = arange(m_i/2, 1.05*m_i, m_i/10) #make just a part of it, then at plotting symmetry
ui_cov = repeat([v_i],6)
up_arr = arange(m_p/2, 1.05*m_p, m_p/10)
up_cov = repeat([v_p],6)
n = len(ui_arr)
pcoeff_arr = zeros((n, times))
icoeff_arr = zeros((n, times))
pvar_arr = zeros((n, times))
pmean_arr = zeros((n, times))
ivar_arr = zeros((n, times))
imean_arr = zeros((n, times))
# o1 is for Gergo's params
# o2 for mine
for i in range(n):
print (i)
for j in range(times):
pcoeff_arr[i][j], icoeff_arr[i][j], pvar_arr[i][j], pmean_arr[i][j], ivar_arr[i][j], imean_arr[i][j] = new_gpairs_match2(ui_arr[i], up_arr[i], ui_cov[i], up_cov[i], corr_i, corr_p, l, imgNr, k, ne, ut, k, ne, ut)[0:6]
save("txt/fig3b_o1.npy", [pcoeff_arr, icoeff_arr, pvar_arr, pmean_arr, ivar_arr, imean_arr])
else:
pcoeff_arr, icoeff_arr, pvar_arr, pmean_arr, ivar_arr, imean_arr = load("txt/fig3b_o1.npy")
print(icoeff_arr)
print(pcoeff_arr)
orient = [-150, -130, -105, -75, -40 ,0, 40, 75, 105, 130, 150]
pmean_arr = concatenate([pmean_arr[:-1], pmean_arr[::-1]])
imean_arr = concatenate([imean_arr[:-1], imean_arr[::-1]])
pvar_arr = concatenate([pvar_arr[:-1], pvar_arr[::-1]])
ivar_arr = concatenate([ivar_arr[:-1], ivar_arr[::-1]])
pcoeff_arr = concatenate([pcoeff_arr[:-1], pcoeff_arr[::-1]])
icoeff_arr = concatenate([icoeff_arr[:-1], icoeff_arr[::-1]])
pfano_arr = divide(pvar_arr, pmean_arr)
ifano_arr = divide(ivar_arr, imean_arr)
tl = ""#r"$Mean_{SC}$"
fn = "images/fig3b_o1_m.png"
plt.clf()
plt.plot(orient, pmean_arr[:,0], "r.", label = "Poisson", ms = 10)
plt.plot(orient, imean_arr[:,0], "b.", label = "Integration", ms = 10)
plt.plot(orient, pmean_arr[:,1:], "r.", ms = 10)
plt.plot(orient, imean_arr[:,1:], "b.", ms = 10)
plt.xticks([-160, -80, 0, 80, 160])
extra = plt.legend(bbox_to_anchor=(1, 0.5), loc='center left', ncol=1, fontsize = 30)
myplot(r"$orientation (deg)$", r"$Mean_{SC}$", tl, fn, xlog=False, legend=False, extra=(extra,), fsax=50, xloc=False)
tl = ""#r"$Fano_{SC}$"
fn = "images/fig3b_o1_f.png"
plt.clf()
plt.plot(orient, pfano_arr[:,0], "r.", label = "Poisson", ms = 10)
plt.plot(orient, ifano_arr[:,0], "b.", label = "Integration", ms = 10)
plt.plot(orient, pfano_arr[:,1:], "r.", ms = 10)
plt.plot(orient, ifano_arr[:,1:], "b.", ms = 10)
plt.xticks([-160, -80, 0, 80, 160])
extra = plt.legend(bbox_to_anchor=(1, 0.5), loc='center left', ncol=1, fontsize = 30)
myplot(r"$orientation (deg)$", r"$Fano_{SC}$", tl, fn, xlog=False, legend=False, extra=(extra,), fsax=50, xloc=False)
tl = ""#r"$\rho_{SC}$"
fn = "images/fig3b_o1_c.png"
plt.clf()
plt.plot(orient, pcoeff_arr[:,0], "r.", label = "Poisson", ms = 10)
plt.plot(orient, icoeff_arr[:,0], "b.", label = "Integration", ms = 10)
plt.plot(orient, pcoeff_arr[:,1:], "r.", ms = 10)
plt.plot(orient, icoeff_arr[:,1:], "b.", ms = 10)
plt.xticks([-160, -80, 0, 80, 160])
extra = plt.legend(bbox_to_anchor=(1, 0.5), loc='center left', ncol=1, fontsize = 30)
myplot(r"$orientation (deg)$", r"$\rho_{SC}$", tl, fn, xlog=False, legend=False, extra=(extra,), fsax=50, xloc=False)
def fig3b_c(make=1):
# contrast changes: variances decrease, means increase. (in membrane potentials)
# the case of 30, 31.5
# for poisson
#3.3 0.8
#31.9687644444 30.136
# for integration
#1.0 19.6
#31.1467831111 30.3
# saved with name_fit
# 2%
# check out with these parameters; what rates are needed to make the fano-s parallel?
# maybe make a search around these parameters for a better match!
# try to balance the things out
#muRG=0.9;
#muP=1.2;
#cRG=.15;
#cP=.85;
#vRG=2.3;
#vP=.2;
#k=20; % ezt elosztja 50-nel
#m=1.4;
#threshold = 0;
# m_p = 3.3
# v_p = 0.8
# m_i = 1.0
# v_i = 19.6
m_p = 1.2
v_p = 0.2
m_i = 0.9
v_i = 2.3
if(make==1):
l = 50
imgNr = 30000 #30000
# k = 10
# ne = 1.1
# ut = 1.9
# corr_i = 0.15
# corr_p = 0.95
k = 10
ne = 1.4
ut = 0
corr_i = 0.15
corr_p = 0.85
# 1 is the other version, with fig3b_c
# maybe make it free to choose other variance-changing stuff
# 3 is the version with Gergo's params
ui_arr = arange(m_i/2, 1.05*m_i, m_i/10) #make just a part of it, then at plotting symmetry
ui_cov = arange(v_i, 1.42*v_i, v_i*0.08)[::-1]
up_arr = arange(m_p/2, 1.05*m_p, m_p/10)
up_cov = arange(v_p, 3.42*v_p, v_p*0.48)[::-1]
#3.4 for Poisson
#1.4 for Integration
times = 3
n = len(ui_arr)
pcoeff_arr = zeros((n,times))
icoeff_arr = zeros((n,times))
pvar_arr = zeros((n,times))
pmean_arr = zeros((n,times))
ivar_arr = zeros((n,times))
imean_arr = zeros((n,times))
for i in range(0,n):
print (i)
for j in range(times):
pcoeff_arr[i][j], icoeff_arr[i][j], pvar_arr[i][j], pmean_arr[i][j], ivar_arr[i][j], imean_arr[i][j] = new_gpairs_match2(ui_arr[i], up_arr[i], ui_cov[i], up_cov[i], corr_i, corr_p, l, imgNr, k, ne, ut, k, ne, ut)[0:6]
save("txt/fig3b_c3.npy", [pcoeff_arr, icoeff_arr, pvar_arr, pmean_arr, ivar_arr, imean_arr])
else:
pcoeff_arr, icoeff_arr, pvar_arr, pmean_arr, ivar_arr, imean_arr = load("txt/fig3b_c3.npy")
print(icoeff_arr)
print(pcoeff_arr)
contr = arange(0,1.05,0.2)
pfano_arr = divide(pvar_arr, pmean_arr)
ifano_arr = divide(ivar_arr, imean_arr)
tl = ""#r"$Mean_{SC}$"
fn = "images/fig3b_c2_m.png"
plt.clf()
plt.plot(contr, pmean_arr[:,0], "r.", label = "Poisson", ms = 10)
plt.plot(contr, imean_arr[:,0], "b.", label = "Integration", ms = 10)
plt.plot(contr, pmean_arr[:,1:], "r.", ms = 10)
plt.plot(contr, imean_arr[:,1:], "b.", ms = 10)
plt.xticks([0.2, 0.5, 0.8])
extra = plt.legend(bbox_to_anchor=(1, 0.5), loc='center left', ncol=1, fontsize = 30)
myplot(r"$contrast$", r"$Mean_{SC}$", tl, fn, xlog=False, legend=False, extra=(extra,), fsax=50, xloc=False)
tl = ""#r"$Fano_{SC}$"
fn = "images/fig3b_c2_f.png"
plt.clf()
plt.plot(contr, pfano_arr[:,0], "r.", label = "Poisson", ms = 10)
plt.plot(contr, ifano_arr[:,0], "b.", label = "Integration", ms = 10)
plt.plot(contr, pfano_arr[:,1:], "r.", ms = 10)
plt.plot(contr, ifano_arr[:,1:], "b.", ms = 10)
plt.xticks([0.2, 0.5, 0.8])
extra = plt.legend(bbox_to_anchor=(1, 0.5), loc='center left', ncol=1, fontsize = 30)
myplot(r"$contrast$", r"$Fano_{SC}$", tl, fn, xlog=False, legend=False, extra=(extra,), fsax=50, xloc=False)
tl = ""#r"$\rho_{SC}$"
fn = "images/fig3b_c2_c.png"
plt.clf()
plt.plot(contr, pcoeff_arr[:,0], "r.", label = "Poisson", ms = 10)
plt.plot(contr, icoeff_arr[:,0], "b.", label = "Integration", ms = 10)
plt.plot(contr, pcoeff_arr[:,1:], "r.", ms = 10)
plt.plot(contr, icoeff_arr[:,1:], "b.", ms = 10)
plt.xticks([0.2, 0.5, 0.8])
extra = plt.legend(bbox_to_anchor=(1, 0.5), loc='center left', ncol=1, fontsize = 30)
myplot(r"$contrast$", r"$\rho_{SC}$", tl, fn, xlog=False, legend=False, extra=(extra,), fsax=50, xloc=False)
def correlation_derivative(make = 1):
# surface for spike count mean and variance -- spkc corr. derivative
l = 50
imgNr = 30000 #30000
k = 10
ne = 1.4
ut = 0
corr_i = 0.15
corr_p = 0.85
# not exactly these params, but almost
ui_arr = arange(-1.5, 8, 0.8)
ui_cov = arange(1, 5, 0.7)
up_arr = arange(-1.5, 8, 0.8)
up_cov = arange(1, 5, 0.7)
if(make==1):
n = len(ui_arr)
m = len(ui_cov)
pcoeff_arr = zeros((n,m))
icoeff_arr = zeros((n,m))
pvar_arr = zeros((n,m))
pmean_arr = zeros((n,m))
ivar_arr = zeros((n,m))
imean_arr = zeros((n,m))
for i in range(0,n):
for j in range (0,m):
print (i, j)
pcoeff_arr[i,j], icoeff_arr[i,j], pvar_arr[i,j], pmean_arr[i,j], ivar_arr[i,j], imean_arr[i,j] = new_gpairs_match2(ui_arr[i], up_arr[i], ui_cov[j], up_cov[j], corr_i, corr_p, l, imgNr, k, ne, ut, k, ne, ut)[0:6]
print(pcoeff_arr[i,j], icoeff_arr[i,j])
save("txt/corrderiv2.npy", [pcoeff_arr, icoeff_arr, pvar_arr, pmean_arr, ivar_arr, imean_arr])
else:
pcoeff_arr, icoeff_arr, pvar_arr, pmean_arr, ivar_arr, imean_arr = load("txt/corrderiv2.npy")
pcoeff_arr = pcoeff_arr.flatten()
pmean_arr = pmean_arr.flatten()
pvar_arr = pvar_arr.flatten()
m1 = 10
m2 = 25
v1 = 10
v2 = 50
from scipy.interpolate import griddata
from scipy.signal import convolve2d
meani = arange(m1,m2,1)
vari = arange(v1,v2,1)
coeffi = griddata((pmean_arr, pvar_arr), pcoeff_arr, (meani[None,:], vari[:,None]), method='linear')
#coeffi = convolve2d(coeffi, ones((3,3)), "same");
print(coeffi)
# make the derivative calculation here
coeffim = diff(coeffi,1,1)
meani = arange(m1+0.5, m2-0.5,1)
#vari = arange(v1+0.5, v2-0.5,1)
# print (coeffi.shape)
tl = ""
fn = "images/corrderiv_pmf.png"
plt.clf()
#plt.contour(meani,vari,coeffim,15,linewidths=0.5,colors='k')
M, V = meshgrid(meani,vari)
M = M.flatten()
V = V.flatten()
coeffim=coeffim.flatten()
F = V/M
fanos = arange(0.9,2.7, 0.1)
coeffis = griddata((M, F), coeffim, (meani[None,:], fanos[:,None]), method='linear')
#plt.contourf(meani,vari,coeffim,150,cmap=plt.cm.jet)
plt.contourf(meani/10,fanos,coeffis,150,cmap=plt.cm.jet)
#plt.imshow(coeffim,cmap=plt.cm.jet, interpolation = "bilinear")
plt.clim(-0.1, 0.1)
plt.colorbar()
extra = plt.text(2.6, 2.25, r"$\frac{d\rho_{SC}}{dMean_{SC}}$ (1/k)", fontsize=20)
#plt.scatter(pmean_arr,pvar_arr,marker='o',c='b',s=5)
plt.xlim([m1/10,m2/10])
plt.ylim([1.2,2.2])
myplot(r"Mean$_{SC}$ (k)", r"Fano$_{SC}$ (k/Hz)", tl, fn, xlog=False, extra = (extra,), legend=False, fsax=20)
#
# coeffiv = diff(coeffi,1,0)
# meani = arange(m1,m2,1)
# vari = arange(v1+0.5, v2-0.5,1)
## print (coeffi.shape)
# tl = r"Poisson: $\frac{d\rho_{SC}}{dVar_{SC}}$"
# fn = "images/corrderiv_pv.png"
# plt.clf()
# #plt.contour(meani,vari,coeffiv,15,linewidths=0.5,colors='k')
# plt.contourf(meani,vari,coeffiv,150,cmap=plt.cm.jet)
# plt.clim(-0.1, 0.1)
# plt.colorbar()
# #plt.scatter(pmean_arr,pvar_arr,marker='o',c='b',s=5)
# plt.xlim([m1,m2])
# plt.ylim([v1,v2])
# myplot(r"$Mean_{SC} (spk/s)$", r"$Var_{SC} (spk/s)^2$", tl, fn, xlog=False, legend=False, fsax=50)
def correlation_maximum_line(make = 1):
# surface for spike count mean and variance -- spkc corr. derivative
l = 50
imgNr = 30000 #30000
k = 10
ne = 1.4
ut = 0
corr_i = 0.15
corr_p = 0.85
ui_arr = arange(-1.5, 8, 2)
ui_cov = arange(1, 5, 1)
up_arr = arange(-1.5, 8, 2)
up_cov = arange(1, 5, 1)
n = len(ui_arr)
m = len(ui_cov)
if(make==1):
pcoeff_arr = zeros((n,m))
icoeff_arr = zeros((n,m))
pvar_arr = zeros((n,m))
pmean_arr = zeros((n,m))
ivar_arr = zeros((n,m))
imean_arr = zeros((n,m))
for i in range(0,n):
for j in range (0,m):
print (i, j)
pcoeff_arr[i,j], icoeff_arr[i,j], pvar_arr[i,j], pmean_arr[i,j], ivar_arr[i,j], imean_arr[i,j] = new_gpairs_match2(ui_arr[i], up_arr[i], ui_cov[j], up_cov[j], corr_i, corr_p, l, imgNr, k, ne, ut, k, ne, ut)[0:6]
print(pcoeff_arr[i,j], icoeff_arr[i,j])
save("txt/corrderiv4.npy", [pcoeff_arr, icoeff_arr, pvar_arr, pmean_arr, ivar_arr, imean_arr])
else:
pcoeff_arr, icoeff_arr, pvar_arr, pmean_arr, ivar_arr, imean_arr = load("txt/corrderiv4.npy")
# other one was made with 3 --
# return
mn = up_arr[argmax((100*pcoeff_arr).astype(int), 0)]
mn2 = up_arr[n-1-argmax((100*pcoeff_arr[::-1]).astype(int), 0)]
cv = up_cov
fn = "images/corrderiv_max.png"
tl = ""
plt.clf()
plt.plot(mn, cv, "w--", lw=5)
plt.plot(mn2, cv, "w--", lw=5)
plt.xlim([-1.5, 6])
extra = plt.text(5.8, 4.1, r"Mean$_{SC}$ (k)", fontsize=20)
plt.contourf(up_arr,up_cov,pmean_arr.T/10,250,cmap=plt.cm.jet)
plt.clim(0, 10)
plt.colorbar()
myplot(r"Mean$_{mb}$ (mV)", r"Var$_{mb}$ (mV$^2$)", tl, fn, fs=15, fsl=15, xlog=False, extra=(extra,), legend=False, fsax=20, xloc=False)
def put_together():
fig = plt.figure(figsize=(15,10), dpi=1000)
fig.clf()
gs = gridspec.GridSpec(1,2)
padd = 0
hpadd = 0
gs.update(left=padd, right=1-padd, top=1-hpadd, bottom=hpadd, wspace=padd, hspace=-0.3)
im = img.imread("images/corrderiv_pmf.png")
ax = fig.add_subplot(gs[0,0])
ax.imshow(im)
plt.axis('off')
im = img.imread("images/corrderiv_max.png")
ax = fig.add_subplot(gs[0,1])
ax.imshow(im)
plt.axis('off')
fig.savefig("images/corrderiv_together.png", transparent=True, frameon=False)
| gpl-2.0 |
brews/snakebacon | docs/conf.py | 1 | 5837 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# snakebacon documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 8 08:56:05 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
'numpydoc',
]
extlinks = {'issue': ('https://github.com/brews/snakebacon/issues/%s', 'GH'),
'pull': ('https://github.com/brews/snakebacon/pull/%s', 'PR'),
}
autosummary_generate = True
numpydoc_class_members_toctree = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'snakebacon'
copyright = '2017, S. Brewster Malevich'
author = 'S. Brewster Malevich'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'snakebacondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'snakebacon.tex', 'snakebacon Documentation',
'S. Brewster Malevich', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'snakebacon', 'snakebacon Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'snakebacon', 'snakebacon Documentation',
author, 'snakebacon', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3.5/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
}
| gpl-3.0 |
cbrafter/TRB18_GPSVA | codes/mainCode/optimisationTest.py | 1 | 4308 | # -*- coding: utf-8 -*-
import sys
import os
import shutil
import psutil
import subprocess
import time
import numpy as np
# from matplotlib import pyplot
from routeGen import routeGen
from sumoConfigGen import sumoConfigGen
from stripXML import stripXML
import multiprocessing as mp
from glob import glob
#os.chdir(os.path.dirname(sys.argv[0]))
sys.path.insert(0, '../sumoAPI')
import GPSControl
import fixedTimeControl
import actuatedControl
import HybridVAControl
import sumoConnect
import readJunctionData
print(sys.path)
import traci
import scipy.optimize as sopt
import itertools
print('Running the script! {} {}'.format(sys.argv[1], sys.argv[2]))
#os.mkdir('/hardmem/results/stuff/')
# with open('/hardmem/results/stuff/sampleT.txt', 'w') as f:
# f.write('Hello World! {} {}'.format(sys.argv[1], sys.argv[2]))
# print(os.path.exists('/hardmem/results/stuff/'))
# print([psutil.cpu_count(), psutil.cpu_count(logical=False)])
def simulation(x):
#assert len(x) == 4
runtime = time.time()
# Define Simulation Params
FLOW, modelName, tlLogic, CAVratio, run = x
procID = int(mp.current_process().name[-1])
model = './models/{}_{}/'.format(modelName, procID)
simport = 8812 + procID
N = 2500 # Last time to insert vehicle at
stepSize = 0.1
CAVtau = 1.0
configFile = model + modelName + ".sumocfg"
# Configure the Map of controllers to be run
tlControlMap = {'fixedTime': fixedTimeControl.fixedTimeControl,
'VA': actuatedControl.actuatedControl,
'GPSVA': GPSControl.GPSControl,
'HVA': HybridVAControl.HybridVAControl}
tlController = tlControlMap[tlLogic]
exportPath = '/hardmem/results/' + tlLogic + '/' + modelName + '/'
#print(exportPath + str(os.path.exists(exportPath)))
# Check if model copy for this process exists
if not os.path.isdir(model):
shutil.copytree('./models/{}/'.format(modelName), model)
# this is relative to script not cfg file
if not os.path.exists(exportPath):
print('MADE PATH')
os.makedirs(exportPath)
#seed = int(sum([ord(X) for x in modelName + tlLogic]) + int(10*CAVratio) + run)
seed = int(run)
vehNr, lastVeh = routeGen(N, CAVratio, CAVtau,
routeFile=model + modelName + '.rou.xml',
seed=seed)
# Edit the the output filenames in sumoConfig
sumoConfigGen(modelName, configFile, exportPath,
CAVratio, stepSize, run, simport)
# Connect to model
connector = sumoConnect.sumoConnect(configFile, gui=False, port=simport)
connector.launchSumoAndConnect()
# Get junction data
jd = readJunctionData.readJunctionData(model + modelName + ".jcn.xml")
junctionsList = jd.getJunctionData()
# Add controller models to junctions
controllerList = []
for junction in junctionsList:
controllerList.append(tlController(junction, flowarg=FLOW))
# Step simulation while there are vehicles
while traci.simulation.getMinExpectedNumber():
# connector.runSimulationForSeconds(1)
traci.simulationStep()
for controller in controllerList:
controller.process()
# Disconnect from current configuration
finTime = traci.simulation.getCurrentTime()*0.001
print(finTime)
connector.disconnect()
# Strip unused data from results file
ext = '{AVR:03d}_{Nrun:03d}.xml'.format(AVR=int(CAVratio*100), Nrun=run)
for filename in ['queuedata', 'tripinfo']:
target = exportPath+filename+ext
stripXML(target)
runtime = time.gmtime(time.time() - runtime)
print('DONE: {}, {}, Run: {:03d}, AVR: {:03d}%, Runtime: {}\n'
.format(modelName, tlLogic, run, int(CAVratio*100),
time.strftime("%H:%M:%S", runtime)))
return finTime
def fopt(x):
print(x)
config = list(itertools.product([x], ['simpleT'], ['HVA'], [0.], [1,2,3,4]))
workpool = mp.Pool(processes=4)
# Run simualtions in parallel
result = workpool.map(simulation, config, chunksize=1)
return np.mean(result)
print(sopt.fmin(fopt, 2, xtol=0.5, ftol=0.5))
# x=1
# a = simulation([x, 'simpleT', 'HVA', 0, 1])
# b = simulation([x, 'simpleT', 'HVA', 0, 2])
# c = simulation([x, 'simpleT', 'HVA', 0, 3]) | mit |
riyazbhat/Prediction-Strength-and-Gap-Statistics-in-Python | predictionStrength.py | 1 | 3233 | #!/usr/bin/python
import sys
import random
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
def k_fold (data,shuffled):
testSample = list()
trainingSample = list()
for i in range(data.shape[0]):
if i in shuffled:
testSample.append(data[i])
else:
trainingSample.append(data[i])
return np.array(trainingSample), np.array(testSample)
def closest_center (point, centroids):
min_index = -1
min_distance = float('inf')
for i in range(len(centroids)):
center = centroids[i]
#print center,
#print point
d = sum((point - center)**2)
#print d
if d < min_distance:
min_index = i
min_distance = d
return min_index
def calculate_prediction_strength (test_set, test_labels, training_centers, k):
clusterLength = test_labels.tolist().count(k)
if clusterLength <= 1:
return float('inf')
else:
count = 0.
for i in range(len(test_labels)-1):
for j in range(i+1, len(test_labels)):
if test_labels[i] == test_labels[j] == k:
p1 = test_set[i]
p2 = test_set[j]
if (closest_center(p1, training_centers) == closest_center(p2, training_centers)):
count += 1
# Return the proportion of pairs that stayed in the same cluster.
#print count,(clusterLength * (clusterLength - 1) / 2.), clusterLength
count = count / (clusterLength * (clusterLength - 1) / 2.)
return count
def prediction_strength_of_cluster (test_set, test_labels, training_centers, k):
prediction_strengths = [calculate_prediction_strength(test_set, test_labels, training_centers, i) for i in range(k)]
return min(prediction_strengths)
if __name__ == "__main__":
#matrix=np.loadtxt('matrix1k.psp', delimiter=',', dtype=float)
try:
assert sys.argv[1]
except Exception,error:
print error,"Please specify the required argument(s)"
sys.exit(0)
else:
matrix=np.loadtxt(sys.argv[1], delimiter=',', dtype=float)
population = range(matrix.shape[0])
testSetLength = matrix.shape[0]/10
choice = random.sample(population, testSetLength)
maxK = 10
maxTrials = 3
prediction_strengths = np.zeros((maxTrials, maxK))
trainingSet, testingSet = k_fold (matrix,choice)
for trial in range(maxTrials):
#trainingSet, testingSet = k_fold (matrix,choice)
for k in range(1,maxK+1):
if k==1:
prediction_strengths[trial,k-1] = 1.
else:
testCluster = KMeans(n_clusters=k, max_iter=50, n_init=5).fit(testingSet)
trainingCluster = KMeans(n_clusters=k, max_iter=50, n_init=5).fit(trainingSet)
prediction_strengths[trial,k-1] = prediction_strength_of_cluster(testingSet,
testCluster.labels_, trainingCluster.cluster_centers_, k)
means = np.mean(prediction_strengths,0)
stddevs = np.std(prediction_strengths,0)
#We use 0.8 as our prediction strength threshold.
#Find the largest number of clusters with a prediction strength greater than this threshold;
#this forms our estimate of the number of clusters.
print means
if max(means) > 0.8:
estimated = max([i for i,j in enumerate(means) if j > 0.8])+1
else:
estimated = max(means)+1
print "The estimated number of clusters is ", estimated
#print range(1,maxK+1), means
plt.plot(range(1,maxK+1),means)
plt.show()
| mit |
pratapvardhan/scikit-learn | sklearn/svm/tests/test_sparse.py | 35 | 13182 | from nose.tools import assert_raises, assert_true, assert_false
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import assert_warns, assert_raise_message
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
scikit-optimize/scikit-optimize | benchmarks/bench_ml.py | 1 | 15648 | """
This code implements benchmark for the black box optimization algorithms,
applied to a task of optimizing parameters of ML algorithms for the task
of supervised learning.
The code implements benchmark on 4 datasets where parameters for 6 classes
of supervised models are tuned to optimize performance on datasets. Supervised
learning models implementations are taken from sklearn.
Regression learning task is solved on 2 datasets, and classification on the
rest of datasets. 3 model classes are regression models, and rest are
classification models.
"""
from collections import defaultdict
from datetime import datetime
import json
import sys
import math
if sys.version_info.major == 2:
# Python 2
from urllib2 import HTTPError
from urllib import urlopen
else:
from urllib.error import HTTPError
from urllib import urlopen
from joblib import delayed
from joblib import Parallel
import numpy as np
from sklearn.base import ClassifierMixin
from sklearn.base import RegressorMixin
from sklearn.datasets import fetch_california_housing
from sklearn.datasets import fetch_mldata
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import r2_score
from sklearn.metrics import log_loss
from skopt import gbrt_minimize
from skopt import gp_minimize
from skopt import forest_minimize
from skopt.space import Categorical
from skopt.space import Integer
from skopt.space import Real
MODEL_PARAMETERS = "model parameters"
MODEL_BACKEND = "model backend"
# functions below are used to apply non - linear maps to parameter values, eg
# -3.0 -> 0.001
def pow10map(x):
return 10.0 ** x
def pow2intmap(x):
return int(2.0 ** x)
def nop(x):
return x
nnparams = {
# up to 1024 neurons
'hidden_layer_sizes': (Real(1.0, 10.0), pow2intmap),
'activation': (Categorical(['identity', 'logistic', 'tanh', 'relu']), nop),
'solver': (Categorical(['lbfgs', 'sgd', 'adam']), nop),
'alpha': (Real(-5.0, -1), pow10map),
'batch_size': (Real(5.0, 10.0), pow2intmap),
'learning_rate': (Categorical(['constant', 'invscaling', 'adaptive']), nop),
'max_iter': (Real(5.0, 8.0), pow2intmap),
'learning_rate_init': (Real(-5.0, -1), pow10map),
'power_t': (Real(0.01, 0.99), nop),
'momentum': (Real(0.1, 0.98), nop),
'nesterovs_momentum': (Categorical([True, False]), nop),
'beta_1': (Real(0.1, 0.98), nop),
'beta_2': (Real(0.1, 0.9999999), nop),
}
MODELS = {
MLPRegressor: nnparams,
SVR: {
'C': (Real(-4.0, 4.0), pow10map),
'epsilon': (Real(-4.0, 1.0), pow10map),
'gamma': (Real(-4.0, 1.0), pow10map)},
DecisionTreeRegressor: {
'max_depth': (Real(1.0, 4.0), pow2intmap),
'min_samples_split': (Real(1.0, 8.0), pow2intmap)},
MLPClassifier: nnparams,
SVC: {
'C': (Real(-4.0, 4.0), pow10map),
'gamma': (Real(-4.0, 1.0), pow10map)},
DecisionTreeClassifier: {
'max_depth': (Real(1.0, 4.0), pow2intmap),
'min_samples_split': (Real(1.0, 8.0), pow2intmap)}
}
# every dataset should have have a mapping to the mixin that can handle it.
DATASETS = {
"Boston": RegressorMixin,
"Housing": RegressorMixin,
"digits": ClassifierMixin,
"Climate Model Crashes": ClassifierMixin,
}
# bunch of dataset preprocessing functions below
def split_normalize(X, y, random_state):
"""
Splits data into training and validation parts.
Test data is assumed to be used after optimization.
Parameters
----------
* `X` [array-like, shape = (n_samples, n_features)]:
Training data.
* `y`: [array-like, shape = (n_samples)]:
Target values.
Returns
-------
Split of data into training and validation sets.
70% of data is used for training, rest for validation.
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, random_state=random_state)
sc = StandardScaler()
sc.fit(X_train, y_train)
X_train, X_test = sc.transform(X_train), sc.transform(X_test)
return X_train, y_train, X_test, y_test
# this is used to process the output of fetch_mldata
def load_data_target(name):
"""
Loads data and target given the name of the dataset.
"""
if name == "Boston":
data = load_boston()
elif name == "Housing":
data = fetch_california_housing()
dataset_size = 1000 # this is necessary so that SVR does not slow down too much
data["data"] = data["data"][:dataset_size]
data["target"] =data["target"][:dataset_size]
elif name == "digits":
data = load_digits()
elif name == "Climate Model Crashes":
try:
data = fetch_mldata("climate-model-simulation-crashes")
except HTTPError as e:
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00252/pop_failures.dat"
data = urlopen(url).read().split('\n')[1:]
data = [[float(v) for v in d.split()] for d in data]
samples = np.array(data)
data = dict()
data["data"] = samples[:, :-1]
data["target"] = np.array(samples[:, -1], dtype=np.int)
else:
raise ValueError("dataset not supported.")
return data["data"], data["target"]
class MLBench(object):
"""
A class which is used to perform benchmarking of black box optimization
algorithms on various machine learning problems.
On instantiation, the dataset is loaded that is used for experimentation
and is kept in memory in order to avoid delays due to reloading of data.
Parameters
----------
* `model`: scikit-learn estimator
An instance of a sklearn estimator.
* `dataset`: str
Name of the dataset.
* `random_state`: seed
Initialization for the random number generator in numpy.
"""
def __init__(self, model, dataset, random_state):
X, Y = load_data_target(dataset)
self.X_train, self.y_train, self.X_test, self.y_test = split_normalize(
X, Y, random_state)
self.random_state = random_state
self.model = model
self.space = MODELS[model]
def evaluate(self, point):
"""
Fits model using the particular setting of hyperparameters and
evaluates the model validation data.
Parameters
----------
* `point`: dict
A mapping of parameter names to the corresponding values
Returns
-------
* `score`: float
Score (more is better!) for some specific point
"""
X_train, y_train, X_test, y_test = (
self.X_train, self.y_train, self.X_test, self.y_test)
# apply transformation to model parameters, for example exp transformation
point_mapped = {}
for param, val in point.items():
point_mapped[param] = self.space[param][1](val)
model_instance = self.model(**point_mapped)
if 'random_state' in model_instance.get_params():
model_instance.set_params(random_state=self.random_state)
min_obj_val = -5.0
# Infeasible parameters are expected to raise an exception, thus the try
# catch below, infeasible parameters yield assumed smallest objective.
try:
model_instance.fit(X_train, y_train)
if isinstance(model_instance, RegressorMixin): # r^2 metric
y_predicted = model_instance.predict(X_test)
score = r2_score(y_test, y_predicted)
elif isinstance(model_instance, ClassifierMixin): # log loss
y_predicted = model_instance.predict_proba(X_test)
score = -log_loss(y_test, y_predicted) # in the context of this function, the higher score is better
# avoid any kind of singularitites, eg probability being zero, and thus breaking the log_loss
if math.isnan(score):
score = min_obj_val
score = max(score, min_obj_val) # this is necessary to avoid -inf or NaN
except BaseException as ex:
score = min_obj_val # on error: return assumed smallest value of objective function
return score
# this is necessary to generate table for README in the end
table_template = """|Blackbox Function| Minimum | Best minimum | Mean f_calls to min | Std f_calls to min | Fastest f_calls to min
------------------|------------|-----------|---------------------|--------------------|-----------------------
| """
def calculate_performance(all_data):
"""
Calculates the performance metrics as found in "benchmarks" folder of
scikit-optimize and prints them in console.
Parameters
----------
* `all_data`: dict
Traces data collected during run of algorithms. For more details, see
'evaluate_optimizer' function.
"""
sorted_traces = defaultdict(list)
for model in all_data:
for dataset in all_data[model]:
for algorithm in all_data[model][dataset]:
data = all_data[model][dataset][algorithm]
# leave only best objective values at particular iteration
best = [[v[-1] for v in d] for d in data]
supervised_learning_type = "Regression" if ("Regressor" in model) else "Classification"
# for every item in sorted_traces it is 2d array, where first dimension corresponds to
# particular repeat of experiment, and second dimension corresponds to index
# of optimization step during optimization
key = (algorithm, supervised_learning_type)
sorted_traces[key].append(best)
# calculate averages
for key in sorted_traces:
# the meta objective: average over multiple tasks
mean_obj_vals = np.mean(sorted_traces[key], axis=0)
minimums = np.min(mean_obj_vals, axis=1)
f_calls = np.argmin(mean_obj_vals, axis=1)
min_mean = np.mean(minimums)
min_stdd = np.std(minimums)
min_best = np.min(minimums)
f_mean = np.mean(f_calls)
f_stdd = np.std(f_calls)
f_best = np.min(f_calls)
def fmt(float_value):
return ("%.3f" % float_value)
output = str(key[0]) + " | " + " | ".join(
[fmt(min_mean) + " +/- " + fmt(min_stdd)] + [fmt(v) for v in [min_best, f_mean, f_stdd, f_best]])
result = table_template + output
print("")
print(key[1])
print(result)
def evaluate_optimizer(surrogate_minimize, model, dataset, n_calls, random_state):
"""
Evaluates some estimator for the task of optimization of parameters of some
model, given limited number of model evaluations.
Parameters
----------
* `surrogate_minimize`:
Minimization function from skopt (eg gp_minimize) that is used
to minimize the objective.
* `model`: scikit-learn estimator.
sklearn estimator used for parameter tuning.
* `dataset`: str
Name of dataset to train ML model on.
* `n_calls`: int
Budget of evaluations
* `random_state`: seed
Set the random number generator in numpy.
Returns
-------
* `trace`: list of tuples
(p, f(p), best), where p is a dictionary of the form "param name":value,
and f(p) is performance achieved by the model for configuration p
and best is the best value till that index.
Such a list contains history of execution of optimization.
"""
# below seed is necessary for processes which fork at the same time
# so that random numbers generated in processes are different
np.random.seed(random_state)
problem = MLBench(model, dataset, random_state)
space = problem.space
dimensions_names = sorted(space)
dimensions = [space[d][0] for d in dimensions_names]
def objective(x):
# convert list of dimension values to dictionary
x = dict(zip(dimensions_names, x))
# the result of "evaluate" is accuracy / r^2, which is the more the better
y = -problem.evaluate(x)
return y
# optimization loop
result = surrogate_minimize(objective, dimensions, n_calls=n_calls, random_state=random_state)
trace = []
min_y = np.inf
for x, y in zip(result.x_iters, result.func_vals):
min_y = min(y, min_y)
x_dct = dict(zip(dimensions_names, x))
trace.append((x_dct, y, min_y))
print(random_state)
return trace
def run(n_calls=32, n_runs=1, save_traces=True, n_jobs=1):
"""
Main function used to run the experiments.
Parameters
----------
* `n_calls`: int
Evaluation budget.
* `n_runs`: int
Number of times to repeat the optimization in order to average out noise.
* `save_traces`: bool
Whether or not to save data collected during optimization
* `n_jobs`: int
Number of different repeats of optimization to run in parallel.
"""
surrogate_minimizers = [gbrt_minimize, forest_minimize, gp_minimize]
selected_models = sorted(MODELS, key=lambda x: x.__name__)
selected_datasets = (DATASETS.keys())
# all the parameter values and objectives collected during execution are stored in list below
all_data = {}
for model in selected_models:
all_data[model] = {}
for dataset in selected_datasets:
if not issubclass(model, DATASETS[dataset]):
continue
all_data[model][dataset] = {}
for surrogate_minimizer in surrogate_minimizers:
print(surrogate_minimizer.__name__, model.__name__, dataset)
seeds = np.random.randint(0, 2**30, n_runs)
raw_trace = Parallel(n_jobs=n_jobs)(
delayed(evaluate_optimizer)(
surrogate_minimizer, model, dataset, n_calls, seed
) for seed in seeds
)
all_data[model][dataset][surrogate_minimizer.__name__] = raw_trace
# convert the model keys to strings so that results can be saved as json
all_data = {k.__name__: v for k,v in all_data.items()}
# dump the recorded objective values as json
if save_traces:
with open(datetime.now().strftime("%m_%Y_%d_%H_%m_%s")+'.json', 'w') as f:
json.dump(all_data, f)
calculate_performance(all_data)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--n_calls', nargs="?", default=50, type=int,
help="Number of function calls.")
parser.add_argument(
'--n_runs', nargs="?", default=10, type=int,
help="Number of re-runs of single algorithm on single instance of a "
"problem, in order to average out the noise.")
parser.add_argument(
'--save_traces', nargs="?", default=False, type=bool,
help="Whether to save pairs (point, objective, best_objective) obtained"
" during experiments in a json file.")
parser.add_argument(
'--n_jobs', nargs="?", default=1, type=int,
help="Number of worker processes used for the benchmark.")
args = parser.parse_args()
run(args.n_calls, args.n_runs, args.save_traces, args.n_jobs)
| bsd-3-clause |