repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
fedspendingtransparency/data-act-broker-backend | tests/unit/dataactbroker/test_update_historical_duns.py | 1 | 27488 | import os
import pandas as pd
from dataactbroker.scripts import update_historical_duns
from dataactcore.config import CONFIG_BROKER
from dataactcore.utils.duns import DUNS_COLUMNS, EXCLUDE_FROM_API
from dataactcore.models.domainModels import DUNS, HistoricDUNS
def test_remove_existing_duns(database):
""" Testing the removing existing duns function"""
sess = database.session
# of the duns 000000001-000000009, half of them are in the database
all_duns = ['00000000{}'.format(x) for x in range(0, 10)]
existing_duns = all_duns[: 4]
data = pd.DataFrame.from_dict({'awardee_or_recipient_uniqu': all_duns})
for duns in existing_duns:
sess.add(DUNS(awardee_or_recipient_uniqu=duns))
sess.commit()
# confirm that the dataframe returned only has half the duns
expected_duns = list(set(existing_duns) ^ set(all_duns))
new_df = update_historical_duns.remove_existing_duns(data, sess)
assert sorted(expected_duns) == sorted(new_df['awardee_or_recipient_uniqu'].tolist())
def mock_get_duns_props_from_sam(duns_list):
""" Mock function for get_duns_props as we can't connect to the SAM service """
request_cols = [col for col in DUNS_COLUMNS if col not in EXCLUDE_FROM_API]
columns = request_cols
results = pd.DataFrame(columns=columns)
duns_mappings = {
'000000001': {
'awardee_or_recipient_uniqu': '000000001',
'uei': 'A1',
'legal_business_name': 'Legal Name 1',
'dba_name': 'Name 1',
'entity_structure': '1A',
'ultimate_parent_unique_ide': '999999999',
'ultimate_parent_uei': 'Z9',
'ultimate_parent_legal_enti': 'Parent Legal Name 1',
'address_line_1': 'Test address 1',
'address_line_2': 'Test address 2',
'city': 'Test city',
'state': 'Test state',
'zip': 'Test zip',
'zip4': 'Test zip4',
'country_code': 'Test country',
'congressional_district': 'Test congressional district',
'business_types_codes': [['A', 'B', 'C']],
'business_types': [['Name A', 'Name B', 'Name C']],
'high_comp_officer1_full_na': 'Test Exec 1',
'high_comp_officer1_amount': '1',
'high_comp_officer2_full_na': 'Test Exec 2',
'high_comp_officer2_amount': '2',
'high_comp_officer3_full_na': 'Test Exec 3',
'high_comp_officer3_amount': '3',
'high_comp_officer4_full_na': 'Test Exec 4',
'high_comp_officer4_amount': '4',
'high_comp_officer5_full_na': 'Test Exec 5',
'high_comp_officer5_amount': '5'
},
'000000002': {
'awardee_or_recipient_uniqu': '000000002',
'uei': 'B2',
'legal_business_name': 'Legal Name 2',
'dba_name': 'Name 2',
'entity_structure': '2B',
'ultimate_parent_unique_ide': '999999998',
'ultimate_parent_uei': 'Y8',
'ultimate_parent_legal_enti': 'Parent Legal Name 2',
'address_line_1': 'Other Test address 1',
'address_line_2': 'Other Test address 2',
'city': 'Other Test city',
'state': 'Other Test state',
'zip': 'Other Test zip',
'zip4': 'Other Test zip4',
'country_code': 'Other Test country',
'congressional_district': 'Other Test congressional district',
'business_types_codes': [['D', 'E', 'F']],
'business_types': [['Name D', 'Name E', 'Name F']],
'high_comp_officer1_full_na': 'Test Other Exec 6',
'high_comp_officer1_amount': '6',
'high_comp_officer2_full_na': 'Test Other Exec 7',
'high_comp_officer2_amount': '7',
'high_comp_officer3_full_na': 'Test Other Exec 8',
'high_comp_officer3_amount': '8',
'high_comp_officer4_full_na': 'Test Other Exec 9',
'high_comp_officer4_amount': '9',
'high_comp_officer5_full_na': 'Test Other Exec 10',
'high_comp_officer5_amount': '10'
}
}
for duns in duns_list:
if duns in duns_mappings:
results = results.append(pd.DataFrame(duns_mappings[duns]), sort=True)
return results
def test_update_duns_props(monkeypatch):
""" Testing updating the duns props with both populated/blank data """
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
duns_df = pd.DataFrame.from_dict({
'awardee_or_recipient_uniqu': ['000000001', '000000002', '000000003']
})
expected_df = pd.DataFrame.from_dict({
'awardee_or_recipient_uniqu': ['000000001', '000000002', '000000003'],
'uei': ['A1', 'B2', None],
'address_line_1': ['Test address 1', 'Other Test address 1', None],
'address_line_2': ['Test address 2', 'Other Test address 2', None],
'city': ['Test city', 'Other Test city', None],
'state': ['Test state', 'Other Test state', None],
'zip': ['Test zip', 'Other Test zip', None],
'zip4': ['Test zip4', 'Other Test zip4', None],
'country_code': ['Test country', 'Other Test country', None],
'congressional_district': ['Test congressional district', 'Other Test congressional district', None],
'business_types_codes': [['A', 'B', 'C'], ['D', 'E', 'F'], []],
'business_types': [['Name A', 'Name B', 'Name C'], ['Name D', 'Name E', 'Name F'], []],
'entity_structure': ['1A', '2B', None],
'dba_name': ['Name 1', 'Name 2', None],
'ultimate_parent_unique_ide': ['999999999', '999999998', None],
'ultimate_parent_uei': ['Z9', 'Y8', None],
'ultimate_parent_legal_enti': ['Parent Legal Name 1', 'Parent Legal Name 2', None],
'high_comp_officer1_full_na': ['Test Exec 1', 'Test Other Exec 6', None],
'high_comp_officer1_amount': ['1', '6', None],
'high_comp_officer2_full_na': ['Test Exec 1', 'Test Other Exec 7', None],
'high_comp_officer2_amount': ['2', '7', None],
'high_comp_officer3_full_na': ['Test Exec 1', 'Test Other Exec 8', None],
'high_comp_officer3_amount': ['3', '8', None],
'high_comp_officer4_full_na': ['Test Exec 1', 'Test Other Exec 9', None],
'high_comp_officer4_amount': ['4', '9', None],
'high_comp_officer5_full_na': ['Test Exec 1', 'Test Other Exec 10', None],
'high_comp_officer5_amount': ['5', '10', None]
})
assert expected_df.sort_index(inplace=True) == update_historical_duns.update_duns_props(duns_df)\
.sort_index(inplace=True)
def test_update_duns_props_empty(monkeypatch):
""" Special case where no data is returned """
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
duns_df = pd.DataFrame.from_dict({
'awardee_or_recipient_uniqu': ['000000003']
})
expected_df = pd.DataFrame.from_dict({
'awardee_or_recipient_uniqu': ['000000003'],
'uei': [None],
'address_line_1': [None],
'address_line_2': [None],
'city': [None],
'state': [None],
'zip': [None],
'zip4': [None],
'country_code': [None],
'congressional_district': [None],
'business_types_codes': [[]],
'business_types': [[]],
'dba_name': [None],
'entity_structure': [None],
'ultimate_parent_unique_ide': [None],
'ultimate_parent_uei': [None],
'ultimate_parent_legal_enti': [None],
'high_comp_officer1_full_na': [None],
'high_comp_officer1_amount': [None],
'high_comp_officer2_full_na': [None],
'high_comp_officer2_amount': [None],
'high_comp_officer3_full_na': [None],
'high_comp_officer3_amount': [None],
'high_comp_officer4_full_na': [None],
'high_comp_officer4_amount': [None],
'high_comp_officer5_full_na': [None],
'high_comp_officer5_amount': [None]
})
assert expected_df.to_dict() == update_historical_duns.update_duns_props(duns_df).to_dict()
def test_run_duns_batches(database, monkeypatch):
""" Test run_duns_batches for the core functionality """
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
sess = database.session
all_duns = ['00000000{}'.format(x) for x in range(1, 5)]
existing_duns = all_duns[2:]
for duns in existing_duns:
sess.add(DUNS(awardee_or_recipient_uniqu=duns))
sess.commit()
duns_file = os.path.join(CONFIG_BROKER['path'], 'tests', 'unit', 'data', 'historic_DUNS_export_small.csv')
update_historical_duns.run_duns_batches(duns_file, sess, block_size=1)
expected_results = {
'000000001': {
'awardee_or_recipient_uniqu': '000000001',
'uei': 'A1',
'registration_date': '2004-04-01',
'expiration_date': '2013-01-11',
'last_sam_mod_date': '2013-01-11',
'activation_date': '2012-01-11',
'legal_business_name': 'TEST DUNS 1',
'address_line_1': 'Test address 1',
'address_line_2': 'Test address 2',
'city': 'Test city',
'state': 'Test state',
'zip': 'Test zip',
'zip4': 'Test zip4',
'country_code': 'Test country',
'congressional_district': 'Test congressional district',
'business_types_codes': ['A', 'B', 'C'],
'business_types': ['Name A', 'Name B', 'Name C'],
'dba_name': 'Name 1',
'entity_structure': '1A',
'ultimate_parent_unique_ide': '999999999',
'ultimate_parent_uei': 'Z9',
'ultimate_parent_legal_enti': 'Parent Legal Name 1',
'high_comp_officer1_full_na': 'Test Exec 1',
'high_comp_officer1_amount': '1',
'high_comp_officer2_full_na': 'Test Exec 2',
'high_comp_officer2_amount': '2',
'high_comp_officer3_full_na': 'Test Exec 3',
'high_comp_officer3_amount': '3',
'high_comp_officer4_full_na': 'Test Exec 4',
'high_comp_officer4_amount': '4',
'high_comp_officer5_full_na': 'Test Exec 5',
'high_comp_officer5_amount': '5'
},
'000000002': {
'awardee_or_recipient_uniqu': '000000002',
'uei': 'B2',
'registration_date': '2004-04-02',
'expiration_date': '2013-01-12',
'last_sam_mod_date': '2013-01-12',
'activation_date': '2012-01-12',
'legal_business_name': 'TEST DUNS 2',
'address_line_1': 'Other Test address 1',
'address_line_2': 'Other Test address 2',
'city': 'Other Test city',
'state': 'Other Test state',
'zip': 'Other Test zip',
'zip4': 'Other Test zip4',
'country_code': 'Other Test country',
'congressional_district': 'Other Test congressional district',
'business_types_codes': ['D', 'E', 'F'],
'business_types': ['Name D', 'Name E', 'Name F'],
'dba_name': 'Name 2',
'entity_structure': '2B',
'ultimate_parent_unique_ide': '999999998',
'ultimate_parent_uei': 'Y8',
'ultimate_parent_legal_enti': 'Parent Legal Name 2',
'high_comp_officer1_full_na': 'Test Other Exec 6',
'high_comp_officer1_amount': '6',
'high_comp_officer2_full_na': 'Test Other Exec 7',
'high_comp_officer2_amount': '7',
'high_comp_officer3_full_na': 'Test Other Exec 8',
'high_comp_officer3_amount': '8',
'high_comp_officer4_full_na': 'Test Other Exec 9',
'high_comp_officer4_amount': '9',
'high_comp_officer5_full_na': 'Test Other Exec 10',
'high_comp_officer5_amount': '10'
}
}
results = {}
for duns_obj in sess.query(HistoricDUNS).all():
results[duns_obj.awardee_or_recipient_uniqu] = {
'awardee_or_recipient_uniqu': duns_obj.awardee_or_recipient_uniqu,
'uei': duns_obj.uei,
'registration_date': str(duns_obj.registration_date) if duns_obj.registration_date else None,
'expiration_date': str(duns_obj.expiration_date) if duns_obj.expiration_date else None,
'last_sam_mod_date': str(duns_obj.last_sam_mod_date) if duns_obj.last_sam_mod_date else None,
'activation_date': str(duns_obj.activation_date) if duns_obj.activation_date else None,
'legal_business_name': duns_obj.legal_business_name,
'address_line_1': duns_obj.address_line_1,
'address_line_2': duns_obj.address_line_2,
'city': duns_obj.city,
'state': duns_obj.state,
'zip': duns_obj.zip,
'zip4': duns_obj.zip4,
'country_code': duns_obj.country_code,
'congressional_district': duns_obj.congressional_district,
'business_types_codes': duns_obj.business_types_codes,
'business_types': duns_obj.business_types,
'dba_name': duns_obj.dba_name,
'entity_structure': duns_obj.entity_structure,
'ultimate_parent_unique_ide': duns_obj.ultimate_parent_unique_ide,
'ultimate_parent_uei': duns_obj.ultimate_parent_uei,
'ultimate_parent_legal_enti': duns_obj.ultimate_parent_legal_enti,
'high_comp_officer1_full_na': duns_obj.high_comp_officer1_full_na,
'high_comp_officer1_amount': duns_obj.high_comp_officer1_amount,
'high_comp_officer2_full_na': duns_obj.high_comp_officer2_full_na,
'high_comp_officer2_amount': duns_obj.high_comp_officer2_amount,
'high_comp_officer3_full_na': duns_obj.high_comp_officer3_full_na,
'high_comp_officer3_amount': duns_obj.high_comp_officer3_amount,
'high_comp_officer4_full_na': duns_obj.high_comp_officer4_full_na,
'high_comp_officer4_amount': duns_obj.high_comp_officer4_amount,
'high_comp_officer5_full_na': duns_obj.high_comp_officer5_full_na,
'high_comp_officer5_amount': duns_obj.high_comp_officer5_amount
}
assert results == expected_results
def test_workflows(database, monkeypatch):
""" Test both scenarios of the script, starting with a full run """
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
sess = database.session
all_duns = ['00000000{}'.format(x) for x in range(1, 5)]
existing_duns = all_duns[2:]
for duns in existing_duns:
sess.add(DUNS(awardee_or_recipient_uniqu=duns))
sess.commit()
duns_file = os.path.join(CONFIG_BROKER['path'], 'tests', 'unit', 'data', 'historic_DUNS_export_small.csv')
update_historical_duns.run_duns_batches(duns_file, sess, block_size=1)
update_historical_duns.import_historic_duns(sess)
expected_results = {
'000000001': {
'awardee_or_recipient_uniqu': '000000001',
'uei': 'A1',
'registration_date': '2004-04-01',
'expiration_date': '2013-01-11',
'last_sam_mod_date': '2013-01-11',
'activation_date': '2012-01-11',
'legal_business_name': 'TEST DUNS 1',
'address_line_1': 'Test address 1',
'address_line_2': 'Test address 2',
'city': 'Test city',
'state': 'Test state',
'zip': 'Test zip',
'zip4': 'Test zip4',
'country_code': 'Test country',
'congressional_district': 'Test congressional district',
'business_types_codes': ['A', 'B', 'C'],
'business_types': ['Name A', 'Name B', 'Name C'],
'dba_name': 'Name 1',
'entity_structure': '1A',
'ultimate_parent_unique_ide': '999999999',
'ultimate_parent_uei': 'Z9',
'ultimate_parent_legal_enti': 'Parent Legal Name 1',
'high_comp_officer1_full_na': 'Test Exec 1',
'high_comp_officer1_amount': '1',
'high_comp_officer2_full_na': 'Test Exec 2',
'high_comp_officer2_amount': '2',
'high_comp_officer3_full_na': 'Test Exec 3',
'high_comp_officer3_amount': '3',
'high_comp_officer4_full_na': 'Test Exec 4',
'high_comp_officer4_amount': '4',
'high_comp_officer5_full_na': 'Test Exec 5',
'high_comp_officer5_amount': '5'
},
'000000002': {
'awardee_or_recipient_uniqu': '000000002',
'uei': 'B2',
'registration_date': '2004-04-02',
'expiration_date': '2013-01-12',
'last_sam_mod_date': '2013-01-12',
'activation_date': '2012-01-12',
'legal_business_name': 'TEST DUNS 2',
'address_line_1': 'Other Test address 1',
'address_line_2': 'Other Test address 2',
'city': 'Other Test city',
'state': 'Other Test state',
'zip': 'Other Test zip',
'zip4': 'Other Test zip4',
'country_code': 'Other Test country',
'congressional_district': 'Other Test congressional district',
'business_types_codes': ['D', 'E', 'F'],
'business_types': ['Name D', 'Name E', 'Name F'],
'dba_name': 'Name 2',
'entity_structure': '2B',
'ultimate_parent_unique_ide': '999999998',
'ultimate_parent_uei': 'Y8',
'ultimate_parent_legal_enti': 'Parent Legal Name 2',
'high_comp_officer1_full_na': 'Test Other Exec 6',
'high_comp_officer1_amount': '6',
'high_comp_officer2_full_na': 'Test Other Exec 7',
'high_comp_officer2_amount': '7',
'high_comp_officer3_full_na': 'Test Other Exec 8',
'high_comp_officer3_amount': '8',
'high_comp_officer4_full_na': 'Test Other Exec 9',
'high_comp_officer4_amount': '9',
'high_comp_officer5_full_na': 'Test Other Exec 10',
'high_comp_officer5_amount': '10'
},
'000000003': {
'awardee_or_recipient_uniqu': '000000003',
'uei': None,
'registration_date': None,
'expiration_date': None,
'last_sam_mod_date': None,
'activation_date': None,
'legal_business_name': None,
'address_line_1': None,
'address_line_2': None,
'city': None,
'state': None,
'zip': None,
'zip4': None,
'country_code': None,
'congressional_district': None,
'business_types_codes': None,
'business_types': None,
'dba_name': None,
'entity_structure': None,
'ultimate_parent_unique_ide': None,
'ultimate_parent_uei': None,
'ultimate_parent_legal_enti': None,
'high_comp_officer1_full_na': None,
'high_comp_officer1_amount': None,
'high_comp_officer2_full_na': None,
'high_comp_officer2_amount': None,
'high_comp_officer3_full_na': None,
'high_comp_officer3_amount': None,
'high_comp_officer4_full_na': None,
'high_comp_officer4_amount': None,
'high_comp_officer5_full_na': None,
'high_comp_officer5_amount': None
},
'000000004': {
'awardee_or_recipient_uniqu': '000000004',
'uei': None,
'registration_date': None,
'expiration_date': None,
'last_sam_mod_date': None,
'activation_date': None,
'legal_business_name': None,
'address_line_1': None,
'address_line_2': None,
'city': None,
'state': None,
'zip': None,
'zip4': None,
'country_code': None,
'congressional_district': None,
'business_types_codes': None,
'business_types': None,
'dba_name': None,
'entity_structure': None,
'ultimate_parent_unique_ide': None,
'ultimate_parent_uei': None,
'ultimate_parent_legal_enti': None,
'high_comp_officer1_full_na': None,
'high_comp_officer1_amount': None,
'high_comp_officer2_full_na': None,
'high_comp_officer2_amount': None,
'high_comp_officer3_full_na': None,
'high_comp_officer3_amount': None,
'high_comp_officer4_full_na': None,
'high_comp_officer4_amount': None,
'high_comp_officer5_full_na': None,
'high_comp_officer5_amount': None
}
}
results = {}
for duns_obj in sess.query(DUNS).all():
results[duns_obj.awardee_or_recipient_uniqu] = {
'awardee_or_recipient_uniqu': duns_obj.awardee_or_recipient_uniqu,
'uei': duns_obj.uei,
'registration_date': str(duns_obj.registration_date) if duns_obj.registration_date else None,
'expiration_date': str(duns_obj.expiration_date) if duns_obj.expiration_date else None,
'last_sam_mod_date': str(duns_obj.last_sam_mod_date) if duns_obj.last_sam_mod_date else None,
'activation_date': str(duns_obj.activation_date) if duns_obj.activation_date else None,
'legal_business_name': duns_obj.legal_business_name,
'address_line_1': duns_obj.address_line_1,
'address_line_2': duns_obj.address_line_2,
'city': duns_obj.city,
'state': duns_obj.state,
'zip': duns_obj.zip,
'zip4': duns_obj.zip4,
'country_code': duns_obj.country_code,
'congressional_district': duns_obj.congressional_district,
'business_types_codes': duns_obj.business_types_codes,
'business_types': duns_obj.business_types,
'dba_name': duns_obj.dba_name,
'entity_structure': duns_obj.entity_structure,
'ultimate_parent_unique_ide': duns_obj.ultimate_parent_unique_ide,
'ultimate_parent_uei': duns_obj.ultimate_parent_uei,
'ultimate_parent_legal_enti': duns_obj.ultimate_parent_legal_enti,
'high_comp_officer1_full_na': duns_obj.high_comp_officer1_full_na,
'high_comp_officer1_amount': duns_obj.high_comp_officer1_amount,
'high_comp_officer2_full_na': duns_obj.high_comp_officer2_full_na,
'high_comp_officer2_amount': duns_obj.high_comp_officer2_amount,
'high_comp_officer3_full_na': duns_obj.high_comp_officer3_full_na,
'high_comp_officer3_amount': duns_obj.high_comp_officer3_amount,
'high_comp_officer4_full_na': duns_obj.high_comp_officer4_full_na,
'high_comp_officer4_amount': duns_obj.high_comp_officer4_amount,
'high_comp_officer5_full_na': duns_obj.high_comp_officer5_full_na,
'high_comp_officer5_amount': duns_obj.high_comp_officer5_amount
}
assert results == expected_results
# Test to see if truncating the DUNS table while keeping the historic reuploads the historic values
sess.query(DUNS).filter(DUNS.historic.is_(True)).delete(synchronize_session=False)
# Make sure all the historic DUNS are removed from the DUNS table
assert sess.query(DUNS).filter(DUNS.historic.is_(True)).all() == []
# Redo script but don't go through run_duns_batches
update_historical_duns.clean_historic_duns(sess)
update_historical_duns.import_historic_duns(sess)
results = {}
for duns_obj in sess.query(DUNS).all():
results[duns_obj.awardee_or_recipient_uniqu] = {
'awardee_or_recipient_uniqu': duns_obj.awardee_or_recipient_uniqu,
'uei': duns_obj.uei,
'registration_date': str(duns_obj.registration_date) if duns_obj.registration_date else None,
'expiration_date': str(duns_obj.expiration_date) if duns_obj.expiration_date else None,
'last_sam_mod_date': str(duns_obj.last_sam_mod_date) if duns_obj.last_sam_mod_date else None,
'activation_date': str(duns_obj.activation_date) if duns_obj.activation_date else None,
'legal_business_name': duns_obj.legal_business_name,
'address_line_1': duns_obj.address_line_1,
'address_line_2': duns_obj.address_line_2,
'city': duns_obj.city,
'state': duns_obj.state,
'zip': duns_obj.zip,
'zip4': duns_obj.zip4,
'country_code': duns_obj.country_code,
'congressional_district': duns_obj.congressional_district,
'business_types_codes': duns_obj.business_types_codes,
'business_types': duns_obj.business_types,
'dba_name': duns_obj.dba_name,
'entity_structure': duns_obj.entity_structure,
'ultimate_parent_unique_ide': duns_obj.ultimate_parent_unique_ide,
'ultimate_parent_uei': duns_obj.ultimate_parent_uei,
'ultimate_parent_legal_enti': duns_obj.ultimate_parent_legal_enti,
'high_comp_officer1_full_na': duns_obj.high_comp_officer1_full_na,
'high_comp_officer1_amount': duns_obj.high_comp_officer1_amount,
'high_comp_officer2_full_na': duns_obj.high_comp_officer2_full_na,
'high_comp_officer2_amount': duns_obj.high_comp_officer2_amount,
'high_comp_officer3_full_na': duns_obj.high_comp_officer3_full_na,
'high_comp_officer3_amount': duns_obj.high_comp_officer3_amount,
'high_comp_officer4_full_na': duns_obj.high_comp_officer4_full_na,
'high_comp_officer4_amount': duns_obj.high_comp_officer4_amount,
'high_comp_officer5_full_na': duns_obj.high_comp_officer5_full_na,
'high_comp_officer5_amount': duns_obj.high_comp_officer5_amount
}
assert results == expected_results
def test_clean_historic_duns(database, monkeypatch):
"""
Test to make sure if a new DUNS is loaded and we reload historic DUNS (skipping the major load),
we should remove the historic equivalents.
"""
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
sess = database.session
all_duns = ['00000000{}'.format(x) for x in range(1, 5)]
existing_duns = all_duns[2:]
for duns in existing_duns:
sess.add(DUNS(awardee_or_recipient_uniqu=duns))
sess.commit()
duns_file = os.path.join(CONFIG_BROKER['path'], 'tests', 'unit', 'data', 'historic_DUNS_export_small.csv')
# normal run
update_historical_duns.run_duns_batches(duns_file, sess, block_size=1)
update_historical_duns.import_historic_duns(sess)
# update old DUNS as part of load_duns_exec_comp.py
updated_duns = sess.query(DUNS).filter(DUNS.awardee_or_recipient_uniqu == '000000002').one()
updated_duns.historic = False
sess.commit()
# rerun with a skip
update_historical_duns.clean_historic_duns(sess)
update_historical_duns.import_historic_duns(sess)
# check to see if historic duns equivalent is removed
expected_count = sess.query(HistoricDUNS).filter(HistoricDUNS.awardee_or_recipient_uniqu == '000000002').count()
assert expected_count == 0
| cc0-1.0 |
pingpan2013/sensor-box-project | sensor_project/genGraphs.py | 1 | 4044 | #!/usr/bin/python
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pandas as pd # used to convert datetime64 to datetime
import csv
import sys
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
class Gen_Graph:
def __init__(self, _filename):
self.filename = _filename
self.data = []
self.dtype = []
def readData(self):
'''Read the data from .csv file'''
with open(self.filename, 'r') as file:
reader = csv.reader(file)
for row in reader:
self.data.append(tuple(row))
return self.data
def genDtype(self):
'''Get the data type, always put DATE in the last '''
for i in xrange(len(self.data[0])):
if i != len(self.data[0]) - 1:
self.dtype.append((str(self.data[0][i]), '<f8'))
else:
self.dtype.append((self.data[0][i], '<M8[s]'))
print "Data Type: " + str(self.dtype)
print '=============================================================='
def uniqueish_color(self):
'''Randomly select a color'''
return plt.cm.gist_ncar(np.random.random())
def genGraph(self):
'''Generate the graph with unique y axis'''
self.genDtype()
x = np.array(self.data[1:], dtype=self.dtype)
np.save('./graph_data/data', x)
t = np.load('./graph_data/data.npy').view(np.recarray)
fig, ax = plt.subplots(1)
'''Drawing multiple lines in one graph'''
for label in self.data[0]:
if label != 'Time':
dtype = t['{0}'.format(label)]
ax.plot(pd.to_datetime(t.Time), dtype)
ax.set_xlabel(' date ')
'''Formatting the date'''
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
plt.title('Sensor Data Flow')
'''Create the labels for different lines'''
labels = list(self.data[0][:-1])
plt.legend(labels, loc='lower left')
plt.show()
def genGraph_m(self):
'''Generate the graph with multiple y axises'''
self.genDtype()
x = np.array(self.data[1:], dtype=self.dtype)
np.save('./graph_data/data', x)
t = np.load('./graph_data/data.npy').view(np.recarray)
fig = plt.figure()
fig.subplots_adjust(right=0.75)
ax = fig.add_subplot(111)
'''Drawing multiple lines with different y axises in one graph'''
lines = []
labels = list(self.data[0][:-1])
for num in xrange(len(self.data[0]) - 1):
label = labels[num]
if num == 0:
dtype = t['{0}'.format(label)]
line1, = ax.plot(pd.to_datetime(t.Time), dtype, color=self.uniqueish_color())
lines.append(line1)
ax.set_ylabel(label)
ax.set_xlabel('Date')
elif label != 'Time':
dtype = t['{0}'.format(label)]
par = ax.twinx()
line2, = par.plot(pd.to_datetime(t.Time), dtype, color=self.uniqueish_color())
lines.append(line2)
par.set_ylabel(label)
'''Formatting the date'''
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
plt.title('Sensor Data Flow')
'''Create the labels for different lines'''
ax.legend(lines, labels, loc='lower left')
plt.draw()
plt.show()
def main():
if len(sys.argv) != 2:
print "Error with the parameters! Please specify the file path!"
sys.exit(2)
filename = sys.argv[1]
gg = Gen_Graph(filename)
data = gg.readData()
print "Original Data: "
for i in data:
print i
print '=============================================================='
gg.genGraph_m()
print "Finished Drawing!"
if __name__ == "__main__":
main()
| gpl-3.0 |
LiaoPan/scikit-learn | sklearn/preprocessing/label.py | 36 | 28728 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import deprecated, column_or_1d
from ..utils.validation import check_array
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-sequences',
'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
@property
@deprecated("Attribute ``indicator_matrix_`` is deprecated and will be "
"removed in 0.17. Use ``y_type_ == 'multilabel-indicator'`` "
"instead")
def indicator_matrix_(self):
return self.y_type_ == 'multilabel-indicator'
@property
@deprecated("Attribute ``multilabel_`` is deprecated and will be removed "
"in 0.17. Use ``y_type_.startswith('multilabel')`` "
"instead")
def multilabel_(self):
return self.y_type_.startswith('multilabel')
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1,
sparse_output=False, multilabel=None):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
if multilabel is not None:
warnings.warn("The multilabel parameter is deprecated as of version "
"0.15 and will be removed in 0.17. The parameter is no "
"longer necessary because the value is automatically "
"inferred.", DeprecationWarning)
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
elif y_type == "multilabel-sequences":
Y = MultiLabelBinarizer(classes=classes,
sparse_output=sparse_output).fit_transform(y)
if sp.issparse(Y):
Y.data[:] = pos_label
else:
Y[Y == 1] = pos_label
return Y
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
elif output_type == "multilabel-sequences":
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
mlb = MultiLabelBinarizer(classes=classes).fit([])
return mlb.inverse_transform(y)
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
liberatorqjw/scikit-learn | sklearn/decomposition/tests/test_pca.py | 25 | 11108 | import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
def test_pca():
"""PCA on dense arrays"""
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
def test_whitening():
"""Check that PCA output has unit-variance"""
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaingin 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_almost_equal(X.std(axis=0).std(), 43.9, 1)
for this_PCA, copy in [(x, y) for x in (PCA, RandomizedPCA)
for y in (True, False)]:
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = this_PCA(n_components=n_components, whiten=True, copy=copy)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components))
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = this_PCA(n_components=n_components, whiten=False,
copy=copy).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
def test_explained_variance():
"""Check that PCA output has unit-variance"""
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2).fit(X)
rpca = RandomizedPCA(n_components=2, random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 3)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_,
np.var(X_rpca, axis=0))
def test_pca_check_projection():
"""Test that the projection of data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
"""Test that the projection of data can be inverted"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for n_components in [-1, 3]:
assert_raises(ValueError, PCA(n_components).fit, X)
def test_randomized_pca_check_projection():
"""Test that the projection by RandomizedPCA on dense data is correct"""
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = RandomizedPCA(n_components=2, random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
"""Test that the projection by RandomizedPCA on list data is correct"""
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = RandomizedPCA(n_components=1,
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
"""Test that RandomizedPCA is inversible on dense data"""
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = RandomizedPCA(n_components=2, random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = RandomizedPCA(n_components=2, whiten=True,
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_almost_equal(relative_max_delta, 0.11, decimal=2)
def test_pca_dim():
"""Check automated dimensionality setting"""
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
"""TODO: explain what this is testing
Or at least use explicit variable names...
"""
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
"""TODO: explain what this is testing
Or at least use explicit variable names...
"""
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
"""
"""
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p)
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95)
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01)
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5).fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
"""Test that probabilistic PCA scoring yields a reasonable score"""
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
"""Test that probabilistic PCA correctly separated different datasets"""
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
"""Check that probabilistic PCA selects the right model"""
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| bsd-3-clause |
Open-Power-System-Data/conventional_power_plants | download_and_process_DE_functions.py | 1 | 14757 | # -*- coding: utf-8 -*-
import urllib.parse
import urllib.request
import posixpath
import datetime
import os
import logging
import filecmp
import difflib
import json
import sqlite3
import hashlib
import yaml
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#from bokeh.io import output_notebook
# output_notebook()
# Logging Setup
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%d %b %Y %H:%M:%S')
logger = logging.getLogger()
# create download and output folders if they do not exist
os.makedirs(os.path.join('download'), exist_ok=True)
os.makedirs(os.path.join('output'), exist_ok=True)
os.makedirs(os.path.join('output', 'original_data'), exist_ok=True)
def get_sha_hash(path, blocksize=65536):
sha_hasher = hashlib.sha256()
with open(path, 'rb') as f:
buffer = f.read(blocksize)
while len(buffer) > 0:
sha_hasher.update(buffer)
buffer = f.read(blocksize)
return sha_hasher.hexdigest()
def downloadandcache(url):
"""
Download a file into a folder called "downloads".
The file is prefixed with the download date YYYY-M-D-.
Returns the local filepath.
Parameters
----------
url : str
Url of a file to be downloaded
"""
path = urllib.parse.urlsplit(url).path
filename = posixpath.basename(path)
now = datetime.datetime.now()
datestring = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
filepath = os.path.join('download', datestring + "-" + filename)
filepath_original_data = os.path.join('output',
'original_data',
filename)
# check if file exists, otherwise download it
if not os.path.exists(filepath):
logger.info('Downloading file %s', filename)
urllib.request.urlretrieve(url, filepath)
urllib.request.urlretrieve(url, filepath_original_data)
else:
logger.info('Using local file from %s', filepath)
foldername = 'download'
return foldername, datestring, filename
def decrementday(year, month, day):
"""
Given values for year, month, and day, the values of the previous day are
returned. At the moment, the function assumes that every month has 31 days,
so that it will return February 31st when given values for March 1.
Parameters
----------
year : integer
Integer year
month : integer
Integer month
day : integer
Integer day
"""
if day > 1:
day = day - 1
else:
day = 31
if month > 1:
month = month - 1
else:
month = 12
year = year - 1
return year, month, day
def getolderfilenameandcleanup(foldername, datestring, filename):
"""
Given a set of foldername and filename as returned by
the downloadandcache function, an older non-identical file with
the same file structure is searched in the folder. Files identical to the
one given are deleted.
Parameters
----------
foldername : str
folder where files are located
datestring : str
string of original file date YYYY-M-D
filename : str
filename of original file
"""
originalfilepath = os.path.join(foldername, datestring + "-" + filename)
now = datetime.datetime.now()
year = now.year
month = now.month
day = now.day
# loop through older possible files
i = 0
while i < 2000:
i = i + 1
year, month, day = decrementday(year, month, day)
datestring = str(year) + "-" + str(month) + "-" + str(day)
filepath = os.path.join(foldername, datestring + "-" + filename)
# Does the file exist?
if os.path.isfile(filepath):
# Check if file is identical to original file. If yes delete this
# file and continue
if filecmp.cmp(originalfilepath, filepath):
# print('files are identical, deleting', filepath)
os.remove(filepath)
else:
# print('files are not identical:', filepath)
return filepath
raise ValueError('no older file found')
def getmatchinglist():
"""
This function returns the matchinglist located under
/input/matching_bnetza_uba.csv
Parameters
----------
none
"""
# read matching list
result = pd.read_csv(
os.path.join('input/data/DE', 'matching_bnetza_uba.csv'),
skiprows=0,
sep=',', # CSV field separator, default is ','
thousands=',', # Thousands separator, default is ','
decimal='.', # Decimal separator, default is '.')
encoding='cp1252')
result['uba_id_string'] = (result['uba_match_name'] + '_'
+ result['uba_match_fuel'])
return result
def getbnetzalist(url_bnetza, previous=False):
"""
This function returns the dataframe of the plantlist by the
Bundesnetzagentur. if previous == True, the next-oldest different plantlist
in the folder is returned as determined by the function
getolderfilenameandcleanup.
Parameters
----------
url_bnetza : str
URL of plant list
previous : boolean
Should previous plant list be returned?
"""
foldername, datestring, filename = downloadandcache(url_bnetza)
if not previous:
plantlist = pd.read_csv(os.path.join(foldername, datestring + "-" + filename),
skiprows=9,
sep=';', # CSV field separator, default is ','
thousands='.', # Thousands separator, default is ','
decimal=',', # Decimal separator, default is '.'
encoding='cp1252')
return plantlist
elif previous:
oldfilename = getolderfilenameandcleanup(foldername, datestring, filename)
oldplantlist = pd.read_csv(oldfilename,
skiprows=9,
sep=';', # CSV field separator, default is ','
thousands='.', # Thousands separator, default is ','
decimal=',', # Decimal separator, default is '.'
encoding='cp1252')
return oldplantlist
def getubalist(url_uba, previous=False):
"""
This function returns the dataframe of the plantlist by the
Umweltbundesamt. if previous == True, the next-oldest different plantlist
in the folder is returned as determined by the function
getolderfilenameandcleanup.
Parameters
----------
url_uba : str
URL of plant list
previous : boolean
Should previous plant list be returned?
"""
foldername, datestring, filename = downloadandcache(url_uba)
if not previous:
plantlist = pd.read_excel(os.path.join(foldername, datestring + "-" + filename), skiprows=9)
return plantlist
elif previous:
oldfilename = getolderfilenameandcleanup(foldername, datestring, filename)
oldplantlist = pd.read_excel(oldfilename, skiprows=9)
return oldplantlist
def getlistdifferences(oldplantlist, newplantlist):
"""
This function returns the difference between two plantlists, and only takes
into account the columns specified within the function.
Parameters
----------
oldplantlist : DataFrame
Old Plantlist
newplantlist : DataFrame
New Plantlist
"""
oldplantlist['source'] = 'old'
newplantlist['source'] = 'new'
comparisonplantlist = pd.concat([oldplantlist, newplantlist])
# Only include some columns in comparison
includecolumns = ['Kraftwerksnummer Bundesnetzagentur',
'Kraftwerksname',
'Blockname',
'Kraftwerksname / Standort',
'Kraftwerksstandort',
'Primärenergieträger',
]
cols = [col for col in comparisonplantlist.columns if col in includecolumns]
comparisonplantlist = comparisonplantlist.drop_duplicates(keep=False, subset=cols)
# Sort by first column
comparisonplantlist = comparisonplantlist.sort_values(comparisonplantlist.columns[0], ascending=True)
return comparisonplantlist
def matchinglistcheck(url_bnetza, url_uba):
"""
This function checks the BNetzA and UBA plantlists against the
matchinglist and prints out errors. For entries form the UBA Plantlist a
suggestion for correction with the closest possible match is printed.
Parameters
----------
oldplantlist : DataFrame
Old Plantlist
newplantlist : DataFrame
New Plantlist
"""
logger.info('Starting Matchinglistcheck')
plantlist_uba = getubalist(url_uba)
plantlist_bnetza = getbnetzalist(url_bnetza)
matchinglist = getmatchinglist()
plantlist_uba['uba_id_string'] = (plantlist_uba['Kraftwerksname / Standort']
+ '_' + plantlist_uba['Primärenergieträger'])
# print(plantlist_uba.uba_id_string)
matchinglist.rename(columns={'ID BNetzA': 'bnetza_id'}, inplace=True)
uba_entrylist = [x for x in plantlist_uba.uba_id_string.tolist() if str(x) != 'nan']
errorfound = False
for entry in matchinglist.index:
# print(entry, matchinglist.loc[entry].bnetza_id, matchinglist.loc[entry].uba_id_string)
bnetza_entries = plantlist_bnetza.loc[(plantlist_bnetza['Kraftwerksnummer Bundesnetzagentur'] == matchinglist.loc[entry].bnetza_id)]
# print(entry, len(bnetza_entries))
if len(bnetza_entries) == 0:
logger.error('Entry not in Bnetzalist:', matchinglist.loc[entry].bnetza_id, matchinglist.loc[entry].uba_id_string)
errorfound = True
uba_entries = plantlist_uba.loc[(plantlist_uba['uba_id_string'] == matchinglist.loc[entry].uba_id_string)]
# print(entry, len(uba_entries))
if len(uba_entries) == 0:
alternatives = difflib.get_close_matches(matchinglist.loc[entry].uba_id_string, uba_entrylist, n=3, cutoff=0.6)
logger.error('Not in ubalist: ' + matchinglist.loc[entry].uba_id_string + ' ' + matchinglist.loc[entry].bnetza_id + ' Possible alternatives: ' + ', '.join(alternatives))
# raise ValueError('Value in Ubalist missing')
errorfound = True
if errorfound == False:
logger.info('No obvious errors in Matchinglist check found')
else:
logger.error('Errors in Matchinglist exist')
def potentialmatching(url_bnetza, url_uba):
"""
This function looks for power plants form the UBA list not contained in the
matching lists. It looks up possible matches based on name similarity.
It returns a list of tuples with the plants name of the UBA List, augmented
with possible matches.
Parameters
----------
url_bnetza : string
Link to BNetzA List
url_uba: string
Link to UBA List
"""
plantlist_uba = getubalist(url_uba)
plantlist_bnetza = getbnetzalist(url_bnetza)
matchinglist = getmatchinglist()
plantlist_bnetza.rename(columns={'Kraftwerksnummer Bundesnetzagentur':'id'}, inplace=True)
plantlist_bnetza_reduced = plantlist_bnetza[plantlist_bnetza['id'].isin(matchinglist['ID BNetzA']) == False]
plantlist_bnetza_reduced = plantlist_bnetza_reduced[plantlist_bnetza_reduced['Energieträger'] != 'Solare Strahlungsenergie']
plantlist_bnetza_reduced = plantlist_bnetza_reduced[plantlist_bnetza_reduced['Energieträger'] != 'Windenergie (Onshore-Anlage)']
plantlist_bnetza_reduced = plantlist_bnetza_reduced[plantlist_bnetza_reduced['Energieträger'] != 'Windenergie (Offshore-Anlage)']
plantlist_bnetza_reduced['name_and_block'] = plantlist_bnetza_reduced['Kraftwerksname'] + ' ' + plantlist_bnetza_reduced['Blockname'] + '_' + plantlist_bnetza_reduced['Energieträger']
plantlist_uba.rename(columns={'Kraftwerksname / Standort' : 'name',
'Primärenergieträger': 'fuel',
'Anlagenart': 'type'}, inplace=True)
# print(plantlist_uba.columns)
plantlist_uba['uba_id_string'] = (plantlist_uba['name']
+ '_' + plantlist_uba['fuel'])
# Reduce uba list
plantlist_uba_reduced = plantlist_uba[plantlist_uba['uba_id_string'].isin(matchinglist['uba_id_string']) == False]
plantlist_uba_reduced = plantlist_uba_reduced[plantlist_uba_reduced['type'] != 'WEA']
plantlist_uba_reduced = plantlist_uba_reduced[plantlist_uba_reduced['type'] != 'PV']
plantlist_uba_reduced = plantlist_uba_reduced[plantlist_uba_reduced['type'].isnull() == False]
possiblematcheslist = []
for entry in plantlist_uba_reduced.index:
# print(entry)
moin = str(plantlist_uba_reduced.loc[entry].uba_id_string)
moin2 = [x for x in plantlist_bnetza_reduced.name_and_block.tolist() if str(x) != 'nan']# plantlist_bnetza_reduced['name_and_block'].tolist()
# print(moin)
# print(moin2)
possiblealternative = difflib.get_close_matches(moin, moin2, n=2, cutoff=0.3)
# print(moin, possiblealternative)
logger.info('Plant ' + moin + ' not in Matchinglist. Possible Matches from BNetzA List: ' + str(possiblealternative))
possiblematcheslist.append((moin, possiblealternative))
# return possiblematcheslist
return plantlist_bnetza_reduced
# Testing this file
if __name__ == "__main__":
# BNetzA Power plant list
url_bnetza = ('http://www.bundesnetzagentur.de/SharedDocs/Downloads/DE/'
'Sachgebiete/Energie/Unternehmen_Institutionen/Versorgungssicherheit/'
'Erzeugungskapazitaeten/Kraftwerksliste/Kraftwerksliste_CSV.csv'
'?__blob=publicationFile&v=10')
# UBA Power plant list
url_uba = ('https://www.umweltbundesamt.de/sites/default/files/medien/372/dokumente/kraftwerke-de-ab-100-mw.xls')
matchinglist = getmatchinglist()
plantlist_bnetza = getbnetzalist(url_bnetza, previous=False)
# plantlist_bnetza_previous = getbnetzalist(url_bnetza, previous=True)
# plantlist_bnetza_differences = getlistdifferences(plantlist_bnetza_previous, plantlist_bnetza)
# plantlist_uba = getubalist(url_uba, previous=False)
# plantlist_uba_previous = getubalist(url_uba, previous=True)
# plantlist_uba_differences = getlistdifferences(plantlist_uba_previous, plantlist_uba)
matchinglistcheck(url_bnetza, url_uba)
res = potentialmatching(url_bnetza, url_uba)
| mit |
q1ang/seaborn | seaborn/tests/test_distributions.py | 14 | 8102 | import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import nose.tools as nt
import numpy.testing as npt
from numpy.testing.decorators import skipif
from . import PlotTestCase
from .. import distributions as dist
try:
import statsmodels.nonparametric.api
assert statsmodels.nonparametric.api
_no_statsmodels = False
except ImportError:
_no_statsmodels = True
class TestKDE(PlotTestCase):
rs = np.random.RandomState(0)
x = rs.randn(50)
y = rs.randn(50)
kernel = "gau"
bw = "scott"
gridsize = 128
clip = (-np.inf, np.inf)
cut = 3
def test_scipy_univariate_kde(self):
"""Test the univariate KDE estimation with scipy."""
grid, y = dist._scipy_univariate_kde(self.x, self.bw, self.gridsize,
self.cut, self.clip)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
for bw in ["silverman", .2]:
dist._scipy_univariate_kde(self.x, bw, self.gridsize,
self.cut, self.clip)
@skipif(_no_statsmodels)
def test_statsmodels_univariate_kde(self):
"""Test the univariate KDE estimation with statsmodels."""
grid, y = dist._statsmodels_univariate_kde(self.x, self.kernel,
self.bw, self.gridsize,
self.cut, self.clip)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
for bw in ["silverman", .2]:
dist._statsmodels_univariate_kde(self.x, self.kernel, bw,
self.gridsize, self.cut,
self.clip)
def test_scipy_bivariate_kde(self):
"""Test the bivariate KDE estimation with scipy."""
clip = [self.clip, self.clip]
x, y, z = dist._scipy_bivariate_kde(self.x, self.y, self.bw,
self.gridsize, self.cut, clip)
nt.assert_equal(x.shape, (self.gridsize, self.gridsize))
nt.assert_equal(y.shape, (self.gridsize, self.gridsize))
nt.assert_equal(len(z), self.gridsize)
# Test a specific bandwidth
clip = [self.clip, self.clip]
x, y, z = dist._scipy_bivariate_kde(self.x, self.y, 1,
self.gridsize, self.cut, clip)
# Test that we get an error with an invalid bandwidth
with nt.assert_raises(ValueError):
dist._scipy_bivariate_kde(self.x, self.y, (1, 2),
self.gridsize, self.cut, clip)
@skipif(_no_statsmodels)
def test_statsmodels_bivariate_kde(self):
"""Test the bivariate KDE estimation with statsmodels."""
clip = [self.clip, self.clip]
x, y, z = dist._statsmodels_bivariate_kde(self.x, self.y, self.bw,
self.gridsize,
self.cut, clip)
nt.assert_equal(x.shape, (self.gridsize, self.gridsize))
nt.assert_equal(y.shape, (self.gridsize, self.gridsize))
nt.assert_equal(len(z), self.gridsize)
@skipif(_no_statsmodels)
def test_statsmodels_kde_cumulative(self):
"""Test computation of cumulative KDE."""
grid, y = dist._statsmodels_univariate_kde(self.x, self.kernel,
self.bw, self.gridsize,
self.cut, self.clip,
cumulative=True)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
# make sure y is monotonically increasing
npt.assert_((np.diff(y) > 0).all())
def test_kde_cummulative_2d(self):
"""Check error if args indicate bivariate KDE and cumulative."""
with npt.assert_raises(TypeError):
dist.kdeplot(self.x, data2=self.y, cumulative=True)
def test_bivariate_kde_series(self):
df = pd.DataFrame({'x': self.x, 'y': self.y})
ax_series = dist.kdeplot(df.x, df.y)
ax_values = dist.kdeplot(df.x.values, df.y.values)
nt.assert_equal(len(ax_series.collections),
len(ax_values.collections))
nt.assert_equal(ax_series.collections[0].get_paths(),
ax_values.collections[0].get_paths())
class TestJointPlot(PlotTestCase):
rs = np.random.RandomState(sum(map(ord, "jointplot")))
x = rs.randn(100)
y = rs.randn(100)
data = pd.DataFrame(dict(x=x, y=y))
def test_scatter(self):
g = dist.jointplot("x", "y", self.data)
nt.assert_equal(len(g.ax_joint.collections), 1)
x, y = g.ax_joint.collections[0].get_offsets().T
npt.assert_array_equal(self.x, x)
npt.assert_array_equal(self.y, y)
x_bins = dist._freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = dist._freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
def test_reg(self):
g = dist.jointplot("x", "y", self.data, kind="reg")
nt.assert_equal(len(g.ax_joint.collections), 2)
x, y = g.ax_joint.collections[0].get_offsets().T
npt.assert_array_equal(self.x, x)
npt.assert_array_equal(self.y, y)
x_bins = dist._freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = dist._freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
nt.assert_equal(len(g.ax_joint.lines), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 1)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_resid(self):
g = dist.jointplot("x", "y", self.data, kind="resid")
nt.assert_equal(len(g.ax_joint.collections), 1)
nt.assert_equal(len(g.ax_joint.lines), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 0)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_hex(self):
g = dist.jointplot("x", "y", self.data, kind="hex")
nt.assert_equal(len(g.ax_joint.collections), 1)
x_bins = dist._freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = dist._freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
def test_kde(self):
g = dist.jointplot("x", "y", self.data, kind="kde")
nt.assert_true(len(g.ax_joint.collections) > 0)
nt.assert_equal(len(g.ax_marg_x.collections), 1)
nt.assert_equal(len(g.ax_marg_y.collections), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 1)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_color(self):
g = dist.jointplot("x", "y", self.data, color="purple")
purple = mpl.colors.colorConverter.to_rgb("purple")
scatter_color = g.ax_joint.collections[0].get_facecolor()[0, :3]
nt.assert_equal(tuple(scatter_color), purple)
hist_color = g.ax_marg_x.patches[0].get_facecolor()[:3]
nt.assert_equal(hist_color, purple)
def test_annotation(self):
g = dist.jointplot("x", "y", self.data)
nt.assert_equal(len(g.ax_joint.legend_.get_texts()), 1)
g = dist.jointplot("x", "y", self.data, stat_func=None)
nt.assert_is(g.ax_joint.legend_, None)
def test_hex_customise(self):
# test that default gridsize can be overridden
g = dist.jointplot("x", "y", self.data, kind="hex",
joint_kws=dict(gridsize=5))
nt.assert_equal(len(g.ax_joint.collections), 1)
a = g.ax_joint.collections[0].get_array()
nt.assert_equal(28, a.shape[0]) # 28 hexagons expected for gridsize 5
def test_bad_kind(self):
with nt.assert_raises(ValueError):
dist.jointplot("x", "y", self.data, kind="not_a_kind")
| bsd-3-clause |
tuanvu216/udacity-course | intro_to_machine_learning/lesson/lesson_4_choose_your_own_algorithm/your_algorithm.py | 1 | 2628 | #!/usr/bin/python
import matplotlib.pyplot as plt
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture
from time import time
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow" points mixed
### in together--separate them so we can give them different colors in the scatterplot,
### and visually identify them
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
#### initial visualization
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.scatter(bumpy_fast, grade_fast, color = "b", label="fast")
plt.scatter(grade_slow, bumpy_slow, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.show()
#################################################################################
### your code here! name your classifier object clf if you want the
### visualization code (prettyPicture) to show you the decision boundary
# K Nearest Neighbor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
clf = KNeighborsClassifier(n_neighbors=1)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
# Random Forest
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
clf = RandomForestClassifier(n_estimators=10)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
# Addaboost
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
clf = AdaBoostClassifier(n_estimators=100)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
try:
prettyPicture(clf, features_test, labels_test)
except NameError:
pass
| mit |
jzt5132/scikit-learn | setup.py | 76 | 9370 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
LumPenPacK/NetworkExtractionFromImages | osx_build/nefi2_osx_amd64_xcode_2015/site-packages/numpy_1.11/numpy/lib/npyio.py | 35 | 71412 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Mote
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-2-clause |
seckcoder/lang-learn | python/sklearn/examples/cluster/plot_kmeans_digits.py | 3 | 4523 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example with compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print __doc__
from time import time
import numpy as np
import pylab as pl
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print "n_digits: %d, \t n_samples %d, \t n_features %d" % (n_digits,
n_samples, n_features)
print 79 * '_'
print ('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print '% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f' % (
name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size),
)
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print 79 * '_'
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will asign a color to each
x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure(1)
pl.clf()
pl.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=pl.cm.Paired,
aspect='auto', origin='lower')
pl.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
pl.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
pl.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
pl.show()
| unlicense |
amozie/amozie | studzie/keras_gym/mountain_car_v0.py | 1 | 2577 | import numpy as np
import matplotlib.pyplot as plt
import gym
import time
import copy
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge
from keras.optimizers import Adam, RMSprop
from keras.callbacks import History
from keras import backend as K
import tensorflow as tf
from gym import Env, Space, spaces
from gym.utils import seeding
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory, EpisodeParameterMemory
from rl.agents.cem import CEMAgent
from rl.agents import SARSAAgent
from rl.callbacks import TrainEpisodeLogger, CallbackList
class MountainCarEnv(Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self) -> None:
self.env = gym.make('MountainCar-v0')
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
def _step(self, action):
step = self.env.step(action)
step = list(step)
step[1] = np.abs(step[0][1]) - 0.05
return tuple(step)
def _reset(self):
return self.env.reset()
def _seed(self, seed=None):
return self.env.seed(seed)
def _render(self, mode='human', close=False):
return self.env.render(mode, close)
def _close(self):
return self.env.close()
env = MountainCarEnv()
env.seed()
nb_actions = env.action_space.n
x = Input((1,) + env.observation_space.shape)
y = Flatten()(x)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(nb_actions)(y)
y = Activation('linear')(y)
model = Model(x, y)
memory = SequentialMemory(limit=10000, window_length=1)
# policy = BoltzmannQPolicy()
policy = EpsGreedyQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32,
enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy)
dqn.compile(Adam(), metrics=['mae'])
hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None)
state = env.reset()
action = env.action_space.sample()
print(action)
state_list= []
for i in range(500):
action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0])
state, reward, done, _ = env.step(2)
state_list.append(reward)
env.render()
env.render(close=True)
dqn.test(env, nb_episodes=5, visualize=True)
env.render(close=True) | apache-2.0 |
depet/scikit-learn | examples/plot_digits_classification.py | 7 | 2231 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import pylab as pl
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits,
# let's have a look at the first 3 images, stored in the `images`
# attribute of the dataset. If we were working from image files, we
# could load them using pylab.imread. For these images know which
# digit they represent: it is given in the 'target' of the dataset.
for index, (image, label) in enumerate(zip(digits.images, digits.target)[:4]):
pl.subplot(2, 4, index + 1)
pl.axis('off')
pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
pl.title('Training: %i' % label)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
for index, (image, prediction) in enumerate(
zip(digits.images[n_samples / 2:], predicted)[:4]):
pl.subplot(2, 4, index + 5)
pl.axis('off')
pl.imshow(image, cmap=pl.cm.gray_r, interpolation='nearest')
pl.title('Prediction: %i' % prediction)
pl.show()
| bsd-3-clause |
anntzer/scikit-learn | sklearn/linear_model/_least_angle.py | 3 | 71554 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from joblib import Parallel
from ._base import LinearModel
from ..base import RegressorMixin, MultiOutputMixin
# mypy error: Module 'sklearn.utils' has no attribute 'arrayfuncs'
from ..utils import arrayfuncs, as_float_array # type: ignore
from ..utils import check_random_state
from ..model_selection import check_cv
from ..exceptions import ConvergenceWarning
from ..utils.validation import _deprecate_positional_args
from ..utils.fixes import delayed
SOLVE_TRIANGULAR_ARGS = {'check_finite': False}
@_deprecate_positional_args
def lars_path(
X,
y,
Xy=None,
*,
Gram=None,
max_iter=500,
alpha_min=0,
method="lar",
copy_X=True,
eps=np.finfo(float).eps,
copy_Gram=True,
verbose=0,
return_path=True,
return_n_iter=False,
positive=False
):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
X : None or array-like of shape (n_samples, n_features)
Input data. Note that if X is None then the Gram matrix must be
specified, i.e., cannot be None or False.
y : None or array-like of shape (n_samples,)
Input targets.
Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Gram : None, 'auto', array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, default=True
If ``False``, ``X`` is overwritten.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_Gram : bool, default=True
If ``False``, ``Gram`` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
Returns
-------
alphas : array-like of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array-like of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See Also
--------
lars_path_gram
lasso_path
lasso_path_gram
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
if X is None and Gram is not None:
raise ValueError(
'X cannot be None if Gram is not None'
'Use lars_path_gram to avoid passing X and y.'
)
return _lars_path_solver(
X=X, y=y, Xy=Xy, Gram=Gram, n_samples=None, max_iter=max_iter,
alpha_min=alpha_min, method=method, copy_X=copy_X,
eps=eps, copy_Gram=copy_Gram, verbose=verbose, return_path=return_path,
return_n_iter=return_n_iter, positive=positive)
@_deprecate_positional_args
def lars_path_gram(
Xy,
Gram,
*,
n_samples,
max_iter=500,
alpha_min=0,
method="lar",
copy_X=True,
eps=np.finfo(float).eps,
copy_Gram=True,
verbose=0,
return_path=True,
return_n_iter=False,
positive=False
):
"""lars_path in the sufficient stats mode [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
Xy : array-like of shape (n_samples,) or (n_samples, n_targets)
Xy = np.dot(X.T, y).
Gram : array-like of shape (n_features, n_features)
Gram = np.dot(X.T * X).
n_samples : int or float
Equivalent size of sample.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, default=True
If ``False``, ``X`` is overwritten.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_Gram : bool, default=True
If ``False``, ``Gram`` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
Returns
-------
alphas : array-like of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array-like of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See Also
--------
lars_path
lasso_path
lasso_path_gram
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
return _lars_path_solver(
X=None, y=None, Xy=Xy, Gram=Gram, n_samples=n_samples,
max_iter=max_iter, alpha_min=alpha_min, method=method,
copy_X=copy_X, eps=eps, copy_Gram=copy_Gram,
verbose=verbose, return_path=return_path,
return_n_iter=return_n_iter, positive=positive)
def _lars_path_solver(
X,
y,
Xy=None,
Gram=None,
n_samples=None,
max_iter=500,
alpha_min=0,
method="lar",
copy_X=True,
eps=np.finfo(float).eps,
copy_Gram=True,
verbose=0,
return_path=True,
return_n_iter=False,
positive=False,
):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
X : None or ndarray of shape (n_samples, n_features)
Input data. Note that if X is None then Gram must be specified,
i.e., cannot be None or False.
y : None or ndarray of shape (n_samples,)
Input targets.
Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \
default=None
`Xy = np.dot(X.T, y)` that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix `(X' * X)`, if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
n_samples : int or float, default=None
Equivalent size of sample. If `None`, it will be `n_samples`.
max_iter : int, default=500
Maximum number of iterations to perform, set to infinity for no limit.
alpha_min : float, default=0
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
copy_X : bool, default=True
If ``False``, ``X`` is overwritten.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_Gram : bool, default=True
If ``False``, ``Gram`` is overwritten.
verbose : int, default=0
Controls output verbosity.
return_path : bool, default=True
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, default=False
Whether to return the number of iterations.
positive : bool, default=False
Restrict coefficients to be >= 0.
This option is only allowed with method 'lasso'. Note that the model
coefficients will not converge to the ordinary-least-squares solution
for small values of alpha. Only coefficients up to the smallest alpha
value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by
the stepwise Lars-Lasso algorithm are typically in congruence with the
solution of the coordinate descent lasso_path function.
Returns
-------
alphas : array-like of shape (n_alphas + 1,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array-like of shape (n_alphas,)
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See Also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Efron et al.
http://statweb.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<https://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<https://en.wikipedia.org/wiki/Lasso_(statistics)>`_
"""
if method == "lar" and positive:
raise ValueError(
"Positive constraint not supported for 'lar' " "coding method."
)
n_samples = n_samples if n_samples is not None else y.size
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if Gram is None or Gram is False:
Gram = None
if X is None:
raise ValueError('X and Gram cannot both be unspecified.')
elif isinstance(Gram, str) and Gram == 'auto' or Gram is True:
if Gram is True or X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
else:
Gram = None
elif copy_Gram:
Gram = Gram.copy()
if Gram is None:
n_features = X.shape[1]
else:
n_features = Cov.shape[0]
if Gram.shape != (n_features, n_features):
raise ValueError('The shapes of the inputs Gram and Xy'
' do not match.')
if copy_X and X is not None and Gram is None:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
if Gram is None:
L = np.empty((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
else:
L = np.empty((max_features, max_features), dtype=Gram.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (Cov,))
solve_cholesky, = get_lapack_funcs(('potrs',), (L,))
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
if Gram is not None:
Gram_copy = Gram.copy()
Cov_copy = Cov.copy()
while True:
if Cov.size:
if positive:
C_idx = np.argmax(Cov)
else:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
if positive:
C = C_
else:
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
if positive:
sign_active[n_active] = np.ones_like(C_)
else:
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**SOLVE_TRIANGULAR_ARGS)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by
# the test suite. The `equality_tolerance` margin added in 0.16
# to get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e.'
' Reduce max_iter or increase eps parameters.'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, _ = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, _ = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny32))
if positive:
gamma_ = min(g1, C / AA)
else:
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny32))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
coefs[-add_features:] = 0
alphas = np.resize(alphas, n_iter + add_features)
alphas[-add_features:] = 0
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
for ii in idx:
arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii)
n_active -= 1
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
temp = Cov_copy[drop_idx] - np.dot(Gram_copy[drop_idx], coef)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(MultiOutputMixin, RegressorMixin, LinearModel):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool, 'auto' or array-like , default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
n_nonzero_coefs : int, default=500
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
fit_path : bool, default=True
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
jitter : float, default=None
Upper bound on a uniform noise parameter to be added to the
`y` values, to satisfy the model's assumption of
one-at-a-time computations. Might help with stability.
.. versionadded:: 0.23
random_state : int, RandomState instance or None, default=None
Determines random number generation for jittering. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`. Ignored if `jitter` is None.
.. versionadded:: 0.23
Attributes
----------
alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller. If this is a list of array-like, the length of the outer
list is `n_targets`.
active_ : list of shape (n_alphas,) or list of such lists
Indices of active variables at the end of the path.
If this is a list of list, the length of the outer list is `n_targets`.
coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \
of such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``. If this is a list
of array-like, the length of the outer list is `n_targets`.
coef_ : array-like of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float or array-like of shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.Lars(n_nonzero_coefs=1)
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
Lars(n_nonzero_coefs=1)
>>> print(reg.coef_)
[ 0. -1.11...]
See Also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
method = "lar"
positive = False
@_deprecate_positional_args
def __init__(self, *, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(float).eps, copy_X=True, fit_path=True,
jitter=None, random_state=None):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
self.jitter = jitter
self.random_state = random_state
@staticmethod
def _get_gram(precompute, X, y):
if (not hasattr(precompute, '__array__')) and (
(precompute is True) or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
precompute = np.dot(X.T, X)
return precompute
def _fit(self, X, y, max_iter, alpha, fit_path, Xy=None):
"""Auxiliary method to fit the model using X, y as training data"""
n_features = X.shape[1]
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
Gram = self._get_gram(self.precompute, X, y)
self.alphas_ = []
self.n_iter_ = []
self.coef_ = np.empty((n_targets, n_features))
if fit_path:
self.active_ = []
self.coef_path_ = []
for k in range(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True, positive=self.positive)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_[k] = coef_path[:, -1]
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
for k in range(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True,
positive=self.positive)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_offset, y_offset, X_scale)
return self
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like of shape (n_samples,) or (n_samples, n_targets), \
default=None
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
Returns
-------
self : object
returns an instance of self.
"""
X, y = self._validate_data(X, y, y_numeric=True, multi_output=True)
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
if self.jitter is not None:
rng = check_random_state(self.random_state)
noise = rng.uniform(high=self.jitter, size=len(y))
y = y + noise
self._fit(X, y, max_iter=max_iter, alpha=alpha, fit_path=self.fit_path,
Xy=Xy)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool, 'auto' or array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=500
Maximum number of iterations to perform.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
fit_path : bool, default=True
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
jitter : float, default=None
Upper bound on a uniform noise parameter to be added to the
`y` values, to satisfy the model's assumption of
one-at-a-time computations. Might help with stability.
.. versionadded:: 0.23
random_state : int, RandomState instance or None, default=None
Determines random number generation for jittering. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`. Ignored if `jitter` is None.
.. versionadded:: 0.23
Attributes
----------
alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller. If this is a list of array-like, the length of the outer
list is `n_targets`.
active_ : list of length n_alphas or list of such lists
Indices of active variables at the end of the path.
If this is a list of list, the length of the outer list is `n_targets`.
coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \
of such arrays
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``. If this is a list
of array-like, the length of the outer list is `n_targets`.
coef_ : array-like of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float or array-like of shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.LassoLars(alpha=0.01)
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
LassoLars(alpha=0.01)
>>> print(reg.coef_)
[ 0. -0.963257...]
See Also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
LassoLarsIC
sklearn.decomposition.sparse_encode
"""
method = 'lasso'
@_deprecate_positional_args
def __init__(self, alpha=1.0, *, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(float).eps, copy_X=True, fit_path=True,
positive=False, jitter=None, random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
self.jitter = jitter
self.random_state = random_state
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(float).eps, positive=False):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array-like of shape (n_samples, n_features)
The data to fit the LARS on
y_train : array-like of shape (n_samples,)
The target variable to fit LARS on
X_test : array-like of shape (n_samples, n_features)
The data to compute the residues on
y_test : array-like of shape (n_samples,)
The target variable to compute the residues on
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : bool, default=True
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : {'lar' , 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : bool or int, default=False
Sets the amount of verbosity
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
max_iter : int, default=500
Maximum number of iterations to perform.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array-like of shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas)
Coefficients along the path
residues : array-like of shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps,
positive=positive)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model.
See glossary entry for :term:`cross-validation estimator`.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
max_iter : int, default=500
Maximum number of iterations to perform.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool, 'auto' or array-like , default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram matrix
cannot be passed as argument since we will use only subsets of X.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
max_n_alphas : int, default=1000
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : int or None, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
active_ : list of length n_alphas or list of such lists
Indices of active variables at the end of the path.
If this is a list of lists, the outer list length is `n_targets`.
coef_ : array-like of shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array-like of shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array-like of shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array-like of shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
mse_path_ : array-like of shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Examples
--------
>>> from sklearn.linear_model import LarsCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_samples=200, noise=4.0, random_state=0)
>>> reg = LarsCV(cv=5).fit(X, y)
>>> reg.score(X, y)
0.9996...
>>> reg.alpha_
0.0254...
>>> reg.predict(X[:1,])
array([154.0842...])
See Also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = "lar"
@_deprecate_positional_args
def __init__(self, *, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=None, eps=np.finfo(float).eps,
copy_X=True):
self.max_iter = max_iter
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
super().__init__(fit_intercept=fit_intercept,
verbose=verbose, normalize=normalize,
precompute=precompute,
n_nonzero_coefs=500,
eps=eps, copy_X=copy_X, fit_path=True)
def _more_tags(self):
return {'multioutput': False}
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
X, y = self._validate_data(X, y, y_numeric=True)
X = as_float_array(X, copy=self.copy_X)
y = as_float_array(y, copy=self.copy_X)
# init cross-validation generator
cv = check_cv(self.cv, classifier=False)
# As we use cross-validation, the Gram matrix is not precomputed here
Gram = self.precompute
if hasattr(Gram, '__array__'):
warnings.warn('Parameter "precompute" cannot be an array in '
'%s. Automatically switch to "auto" instead.'
% self.__class__.__name__)
Gram = 'auto'
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps, positive=self.positive)
for train, test in cv.split(X, y))
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, _, _, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
self._fit(X, y, max_iter=self.max_iter, alpha=best_alpha,
Xy=None, fit_path=True)
return self
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm.
See glossary entry for :term:`cross-validation estimator`.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
max_iter : int, default=500
Maximum number of iterations to perform.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool or 'auto' , default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram matrix
cannot be passed as argument since we will use only subsets of X.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
max_n_alphas : int, default=1000
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : int or None, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsCV only makes sense for problems where
a sparse solution is expected and/or reached.
Attributes
----------
coef_ : array-like of shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array-like of shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array-like of shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array-like of shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
mse_path_ : array-like of shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
active_ : list of int
Indices of active variables at the end of the path.
Examples
--------
>>> from sklearn.linear_model import LassoLarsCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(noise=4.0, random_state=0)
>>> reg = LassoLarsCV(cv=5).fit(X, y)
>>> reg.score(X, y)
0.9992...
>>> reg.alpha_
0.0484...
>>> reg.predict(X[:1,])
array([-77.8723...])
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See Also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
@_deprecate_positional_args
def __init__(self, *, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=None, eps=np.finfo(float).eps,
copy_X=True, positive=False):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.max_iter = max_iter
self.normalize = normalize
self.precompute = precompute
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
self.copy_X = copy_X
self.positive = positive
# XXX : we don't use super().__init__
# to avoid setting n_nonzero_coefs
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : {'bic' , 'aic'}, default='aic'
The type of criterion to use.
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
normalize : bool, default=True
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
precompute : bool, 'auto' or array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=500
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients do not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
As a consequence using LassoLarsIC only makes sense for problems where
a sparse solution is expected and/or reached.
Attributes
----------
coef_ : array-like of shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller. If a list, it will be of length `n_targets`.
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array-like of shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criterion is
chosen. This value is larger by a factor of ``n_samples`` compared to
Eqns. 2.15 and 2.16 in (Zou et al, 2007).
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.LassoLarsIC(criterion='bic')
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
LassoLarsIC(criterion='bic')
>>> print(reg.coef_)
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
https://en.wikipedia.org/wiki/Akaike_information_criterion
https://en.wikipedia.org/wiki/Bayesian_information_criterion
See Also
--------
lars_path, LassoLars, LassoLarsCV
"""
@_deprecate_positional_args
def __init__(self, criterion='aic', *, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(float).eps, copy_X=True, positive=False):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.positive = positive
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
self.fit_path = True
def _more_tags(self):
return {'multioutput': False}
def fit(self, X, y, copy_X=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
training data.
y : array-like of shape (n_samples,)
target values. Will be cast to X's dtype if necessary
copy_X : bool, default=None
If provided, this parameter will override the choice
of copy_X made at instance creation.
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
if copy_X is None:
copy_X = self.copy_X
X, y = self._validate_data(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, copy_X)
Gram = self.precompute
alphas_, _, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=self.max_iter,
eps=self.eps, return_n_iter=True, positive=self.positive)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
sigma2 = np.var(y)
df = np.zeros(coef_path_.shape[1], dtype=int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
eps64 = np.finfo('float64').eps
self.criterion_ = (n_samples * mean_squared_error / (sigma2 + eps64) +
K * df) # Eqns. 2.15--16 in (Zou et al, 2007)
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
jeffersonfparil/GTWAS_POOL_RADseq_SIM | BACKUP_SCRIPTS_20170930/assignPheno.py | 1 | 4355 | #!/usr/bin/env python
import os, subprocess, sys, math
import numpy as np
import matplotlib.pyplot as plt
work_DIR = sys.argv[1]
genoFile = sys.argv[2]
nQTL = int(sys.argv[3])
heritability = float(sys.argv[4])
model = int(sys.argv[5])
os.chdir(work_DIR)
if model == 1:
#################################################
# MODEL 1: additive effects alone
# y = Xb + e; e~N(0, Ve); Ve=Vg(1/1-h^2); Vg=sum(cor(Xq)bqbq')/4
#################################################
GEN = np.genfromtxt(genoFile, delimiter='\t', skip_header=1)
nLOCI = GEN.shape[0]
nIND = GEN.shape[1]
QTL_locations = np.random.choice(range(0, nLOCI), replace=False, size=nQTL)
QTL_locations.sort()
# DEFINING THE DISTRIBUTIONS OF THE EFFECTS:
mean_QTL = 100/(2*nQTL); var_QTL = 2 #normal QTL effects
#QTL effects:
QTL_effects = np.random.normal(mean_QTL, np.sqrt(var_QTL), size=nQTL) #for a mean QTL effect of ~5 and ~mean phenotypic value of 50
QTL_OUT = np.column_stack((QTL_locations, QTL_effects)) #for writing out
##########################################
X=np.transpose(GEN)
GFX=QTL_effects
nFX=nQTL
h2=heritability
#partionning the variance taking into account the linkage among associated SNPs
AssoX=X[:,QTL_locations]
Rho=np.corrcoef(AssoX,rowvar=0)
XtX=GFX.reshape(1,nFX)*GFX.reshape(nFX,1) #GFX * GFX' (nFXxnFX dimensions)
Vg=np.sum(Rho*XtX)/4
Ve=Vg*(1/h2-1)
#Generating the phenotypes based on the variance components
Xb=np.matmul(AssoX, GFX) #alpha
e=np.random.normal(0,Ve**(0.5),nIND)
Y_model1=Xb+e
#OUTPUT
np.savetxt("Simulated_Lolium_perenne_QTL.out", QTL_OUT, fmt='%s' ,delimiter="\t")
np.savetxt("Simulated_Lolium_perenne_PHENOTYPES.data", Y_model1, fmt='%s' ,delimiter="\t")
elif model == 2:
################################################# &#### FIX ME!!!! Y_model2 is giving me the middle finger! ;-P
# MODEL 2: additive genetic effects + transcript levels
# y = Xg + Ba + Zt + e; e~N(0,Ve); Ve = (Vg+Vt)(1/1-h^2)
#################################################
transBase = sys.argv[6]
transGeno = sys.argv[7]
GEN = np.genfromtxt(genoFile, delimiter='\t', skip_header=1)
T_base = np.genfromtxt(transBase, delimiter='\t', dtype=int)
T_geno = np.genfromtxt(transGeno, delimiter='\t', dtype=int)
nLOCI = GEN.shape[0]
nIND = GEN.shape[1]
nTRANS = len(T_base)
QTL_locations = np.random.choice(range(0, nLOCI), replace=False, size=nQTL)
QTL_locations.sort()
# DEFINING THE DISTRIBUTIONS OF THE EFFECTS:
mean_QTL = 100/(2*nQTL); var_QTL = 2 #normal QTL effects
mean_bT = (mean_QTL /4); var_bT = 1 #normal base trancript level effects
mean_gT = (mean_QTL /2); var_gT = 1 #normal genotype-specific trancript level effecs
#QTL effects:
QTL_effects = np.random.normal(mean_QTL, np.sqrt(var_QTL), size=nQTL) #for a mean QTL effect of ~5 and ~mean phenotypic value of 50
QTL_OUT = np.column_stack((QTL_locations, QTL_effects)) #for writing out
#Transcript Base-levels effects:
nCausalTrans = int(np.ceil(np.random.normal(nQTL, 1, size=1))[0]) #number of transcripts that affect the phenotype
locCausalTrans = np.random.choice(nTRANS, size=nCausalTrans, replace=False)
# (i.)base-level effects:
T_base_effects = np.random.normal(mean_bT, np.sqrt(var_bT), size=nCausalTrans)
# (ii.)genotype-specific level effects:
T_geno_effects = np.random.normal(mean_gT, np.sqrt(var_gT), size=nCausalTrans)
##########################################
X=np.transpose(GEN)
GFX=QTL_effects
nFX=nQTL
h2=heritability
T0 = T_base
T1 = T_geno
t0FX = np.zeros((nTRANS,1)); t0FX[locCausalTrans,0] = T_base_effects
t1FX = np.zeros((nTRANS,1)); t1FX[locCausalTrans,0] = T_geno_effects
#variance partitioning adding Vg with Vt--> variation due to genotype-specific transcripts abundance
AssoZ=T1[:,locCausalTrans]
Rho=np.corrcoef(AssoZ,rowvar=0)
ZtZ=T_geno_effects.reshape(1,nCausalTrans)*T_geno_effects.reshape(nCausalTrans,1)
Vt=np.sum(Rho*ZtZ)/4
Vet=(Vg+Vt)*(1/h2-1)
#generating the phenotypes using the new residual distribution et:Vet
Xg = np.matmul(X[:,QTL_locations], GFX)
Ba = np.sum(T0[locCausalTrans]*T_base_effects)
Zt = np.matmul(T1, t1FX)
et=np.random.normal(0,Vet**(0.5),nIND)
Y_model2 = Xg + Ba + Zt[:,0] + et
#OUTPUT
np.savetxt("Simulated_Lolium_perenne_QTL.out", QTL_OUT, fmt='%s' ,delimiter="\t")
np.savetxt("Simulated_Lolium_perenne_PHENOTYPES.data", Y_model1, fmt='%s' ,delimiter="\t") | gpl-3.0 |
suyashbire1/pyhton_scripts_mom6 | plot_twapv_budget_complete.py | 1 | 18150 | import sys
import readParams_moreoptions as rdp1
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mom_plot1 import m6plot, xdegtokm
import numpy as np
from netCDF4 import MFDataset as mfdset, Dataset as dset
import time
from plot_twamomx_budget_complete_direct_newest import extract_twamomx_terms
from plot_twamomy_budget_complete_direct_newest import extract_twamomy_terms
import pyximport
pyximport.install()
from getvaratzc import getvaratzc5, getvaratzc
def getutwa(fhgeo, fh, fh2, sl):
dt = fh.variables['average_DT'][:]
dt = dt[:,np.newaxis,np.newaxis,np.newaxis]
uh = (fh.variables['uh_masked'][sl].filled(0)*dt).sum(axis=0,keepdims=True)/np.sum(dt)
h_cu = fh.variables['h_Cu'][sl].filled(0).mean(axis=0,keepdims=True)
h_cu = np.ma.masked_array(h_cu,mask=(h_cu<1e-3))
dycu = fhgeo.variables['dyCu'][sl[2:]]
utwa = uh/h_cu/dycu
return utwa, h_cu
def getvtwa(fhgeo, fh, fh2, sl):
dt = fh.variables['average_DT'][:]
dt = dt[:,np.newaxis,np.newaxis,np.newaxis]
vh = (fh.variables['vh_masked'][sl]*dt).sum(axis=0,keepdims=True)/np.sum(dt)
h_cv = fh.variables['h_Cv'][sl].mean(axis=0,keepdims=True)
h_cv = np.ma.masked_array(h_cv,mask=(h_cv<1e-3))
dxcv = fhgeo.variables['dxCv'][sl[2:]]
vtwa = vh/dxcv/h_cv
vtwa = np.concatenate((vtwa,-vtwa[:,:,:,-1:]),axis=3)
h_cv = np.concatenate((h_cv,h_cv[:,:,:,-1:]),axis=3)
return vtwa, h_cv
def getpv(fhgeo, fh, fh2, xs, xe, ys, ye, zs=0, ze=None):
sl = np.s_[:,zs:ze,ys:ye,xs:xe]
slpy = np.s_[:,zs:ze,ys:ye+1,xs:xe]
utwa,h_cu = getutwa(fhgeo, fh, fh2, slpy)
dybu = fhgeo.variables['dyBu'][sl[2:]]
utway = np.diff(utwa,axis=2)/dybu
vtwa,h_cv = getvtwa(fhgeo, fh, fh2, sl)
dxbu = fhgeo.variables['dxBu'][sl[2:]]
vtwax = np.diff(vtwa,axis=3)/dxbu
h_q = 0.25*(h_cu[:,:,:-1,:] + h_cu[:,:,1:,:] +
h_cv[:,:,:,:-1] + h_cv[:,:,:,1:])
f = fhgeo.variables['f'][sl[2:]]
pvhash = (f - utway + vtwax)/h_q
return pvhash, h_q
def extract_twapv_terms(geofil,vgeofil,fil,fil2,xstart,xend,ystart,yend,zs,ze,meanax,fil3=None,
alreadysaved=False):
if not alreadysaved:
keepax = ()
for i in range(4):
if i not in meanax:
keepax += (i,)
fhgeo = dset(geofil)
fh = mfdset(fil)
fh2 = mfdset(fil2)
zi = rdp1.getdims(fh)[2][0]
dbl = -np.diff(zi)*9.8/1031
(xs,xe),(ys,ye),dimq = rdp1.getlatlonindx(fh,wlon=xstart,elon=xend,
slat=ystart, nlat=yend,zs=zs,ze=ze,xhxq='xq',yhyq='yq')
dxbu = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[2][4]
dybu = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[2][5]
aq = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[1][1]
f = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[-1]
nt_const = dimq[0].size
pvhash,hq = getpv(fhgeo, fh, fh2, xs, xe, ys, ye)
sl = np.s_[:,zs:ze,ys:ye,xs:xe]
slpy = np.s_[:,zs:ze,ys:ye+1,xs:xe]
dxcu = fhgeo.variables['dxCu'][slpy[2:]]
dycv = fhgeo.variables['dyCv'][sl[2:]]
dycv = np.concatenate((dycv,dycv[:,-1:]),axis=1)
if fil3:
fh3 = mfdset(fil3)
sltn = np.s_[-1:,zs:ze,ys:ye,xs:xe]
islayerdeep0 = fh3.variables['islayerdeep'][-1:,0,0,0]
islayerdeep = (fh3.variables['islayerdeep'][sltn].filled(np.nan))
swash = (islayerdeep0 - islayerdeep)/islayerdeep0*100
fh3.close()
else:
swash = None
xmom = extract_twamomx_terms(geofil,vgeofil,fil,fil2,xs,xe,ys,ye+1,zs,ze,(0,),
alreadysaved=False,xyasindices=True,calledfrompv=True)[2]
ymom = extract_twamomy_terms(geofil,vgeofil,fil,fil2,xs,xe,ys,ye,zs,ze,(0,),
alreadysaved=False,xyasindices=True,calledfrompv=True)[2]
xmom = xmom[np.newaxis,:,:,:,:]
ymom = ymom[np.newaxis,:,:,:,:]
ymom = np.concatenate((ymom,ymom[:,:,:,-1:]),axis=3)
bxppvflx = np.sum(xmom[:,:,:,:,[0,1,3,4]],axis=4)
pvhash1,_ = getpv(fhgeo, fh, fh2, xs, xe, ys-1, ye+1)
sl1 = np.s_[:,zs:ze,ys-1:ye+1,xs:xe]
vtwa1, h_cv1 = getvtwa(fhgeo,fh,fh2,sl1)
vtwa1 = 0.5*(vtwa1[:,:,:,:-1] + vtwa1[:,:,:,1:])
pvhashvtwa = pvhash1*vtwa1
sl1 = np.s_[:,zs:ze,ys:ye+1,xs:xe]
h_cu1 = fh.variables['h_Cu'][sl1].filled(np.nan).mean(axis=0,keepdims=True)
h_cu1 = np.ma.masked_array(h_cu1,mask=(h_cu1<1e-3))
pvflxx = h_cu1*(pvhashvtwa[:,:,:-1,:]+pvhashvtwa[:,:,1:,:])/2
byppvflx = np.sum(ymom[:,:,:,:,[0,1,3,4]],axis=4)
pvhash1,_ = getpv(fhgeo, fh, fh2, xs-1, xe, ys, ye)
sl1 = np.s_[:,zs:ze,ys:ye+1,xs-1:xe]
utwa1, h_cu1 = getutwa(fhgeo,fh,fh2,sl1)
utwa1 = 0.5*(utwa1[:,:,:-1,:]+utwa1[:,:,1:,:])
pvhashutwa = pvhash1*utwa1
pvhashutwa[:,:,:,-1] = 0.0
sl1 = np.s_[:,zs:ze,ys:ye,xs:xe]
h_cv1 = fh.variables['h_Cv'][sl1].mean(axis=0,keepdims=True)
h_cv1 = np.ma.masked_array(h_cv1,mask=(h_cv1<1e-3))
pvflxy = h_cv1*(pvhashutwa[:,:,:,:-1]+pvhashutwa[:,:,:,1:])/2
pvflxy = np.concatenate((pvflxy,pvflxy[:,:,:,-1:]),axis=3)
bx = bxppvflx - pvflxx
by = byppvflx + pvflxy
xmom1 = xmom[:,:,:,:,[2,5,6,7,8,9]]
xmom1 = np.concatenate((+pvflxx[:,:,:,:,np.newaxis], xmom1, bx[:,:,:,:,np.newaxis]), axis = 4)
ymom1 = ymom[:,:,:,:,[2,5,6,7,8,9]]
ymom1 = np.concatenate((-pvflxy[:,:,:,:,np.newaxis], ymom1, by[:,:,:,:,np.newaxis]), axis = 4)
#pv = (-np.diff(xmom*dxcu[:,:,np.newaxis],axis=2) + np.diff(ymom*dycv[:,:,np.newaxis],axis=3))/aq[:,:,np.newaxis]
pv = -np.diff(xmom,axis=2)/dybu[:,:,np.newaxis] + np.diff(ymom,axis=3)/dxbu[:,:,np.newaxis]
pv1x = -np.diff(xmom1*dxcu[:,:,np.newaxis],axis=2)/aq[:,:,np.newaxis]
pv1y = np.diff(ymom1*dycv[:,:,np.newaxis],axis=3)/aq[:,:,np.newaxis]
slyp = np.s_[:,:,ys:ye+1,xs:xe]
ah1 = fhgeo.variables['Ah'][slyp[2:]]
slxmyp = np.s_[:,:,ys:ye+1,xs-1:xe]
uh = fh2.variables['uh'][slxmyp].filled(0).mean(axis=0,keepdims=True)
uhx = np.diff(uh,axis=3)/ah1
uhx = np.concatenate((uhx,uhx[:,:,:,-1:]),axis=3)
uhx = 0.25*(uhx[:,:,:-1,:-1] + uhx[:,:,:-1,1:] + uhx[:,:,1:,:-1] +
uhx[:,:,1:,1:])
pv1y[:,:,:,:,0] += pvhash*uhx
slymp = np.s_[:,:,ys-1:ye+1,xs:xe]
vh = fh2.variables['vh'][slymp].mean(axis=0,keepdims=True)
vhy = np.diff(vh,axis=2)/ah1
vhy = np.concatenate((vhy,vhy[:,:,:,-1:]),axis=3)
vhy = 0.25*(vhy[:,:,:-1,:-1] + vhy[:,:,:-1,1:] + vhy[:,:,1:,:-1] +
vhy[:,:,1:,1:])
pv1x[:,:,:,:,0] += pvhash*vhy
wd = fh2.variables['wd'][slyp].mean(axis=0,keepdims=True)
wdb = np.diff(wd,axis=1)
wdb = np.concatenate((wdb,wdb[:,:,:,-1:]),axis=3)
wdb = 0.25*(wdb[:,:,:-1,:-1] + wdb[:,:,:-1,1:] + wdb[:,:,1:,:-1] +
wdb[:,:,1:,1:])
pv3 = pvhash*wdb
pv3 = pv3[:,:,:,:,np.newaxis]
#hq[hq<1] = np.nan
pvnew = np.concatenate((pv1y[:,:,:,:,:1],
pv1x[:,:,:,:,:1],
pv3,
pv1x[:,:,:,:,1:-1],
pv1y[:,:,:,:,1:-1],
pv1x[:,:,:,:,-1:]+pv1y[:,:,:,:,-1:]),axis=4)/hq[:,:,:,:,np.newaxis]
pv = np.ma.filled(pv.astype(np.float64), np.nan)
pvnew = np.ma.filled(pvnew.astype(np.float64), np.nan)
pvhash = np.ma.filled(pvhash.astype(np.float64), np.nan)
pv = np.nanmean(pv,meanax,keepdims=True)
pvnew = np.nanmean(pvnew,meanax,keepdims=True)
pvhash = np.nanmean(pvhash,meanax,keepdims=True)
X = dimq[keepax[1]]
Y = dimq[keepax[0]]
if 1 in keepax:
dt = fh.variables['average_DT'][:]
dt = dt[:,np.newaxis,np.newaxis,np.newaxis]
em = (fh2.variables['e'][0:,zs:ze,ys:ye,xs:xe]*dt).sum(axis=0,keepdims=True)/np.sum(dt)
em = np.nanmean(em, meanax,keepdims=True)
z = np.linspace(-3000,0,100)
Y = z
P = getvaratzc5(pv.astype(np.float32),
z.astype(np.float32),
em.astype(np.float32)).squeeze()
Pnew = getvaratzc5(pvnew.astype(np.float32),
z.astype(np.float32),
em.astype(np.float32)).squeeze()
pvhash = getvaratzc(pvhash.astype(np.float32),
z.astype(np.float32),
em.astype(np.float32)).squeeze()
if fil3:
swash = np.nanmean(swash, meanax,keepdims=True)
swash = getvaratzc(swash.astype(np.float32),
z.astype(np.float32),
em.astype(np.float32)).squeeze()
else:
P = pv.squeeze()
Pnew = pvnew.squeeze()
pvhash = pvhash.squeeze()
swash = swash.squeeze()
np.savez('twapv_complete_terms', X=X,Y=Y,P=P)
else:
npzfile = np.load('twapv_complete_terms.npz')
X = npzfile['X']
Y = npzfile['Y']
P = npzfile['P']
fhgeo.close()
fh.close()
fh2.close()
return (X,Y,P,pvhash,Pnew,swash)
def plot_twapv(geofil,vgeofil,fil,fil2,xstart,xend,ystart,yend,zs,ze,meanax,
fil3=None,plotterms = [0,1,10,11,12,13], swashperc = 1,
cmaxpercfactor = 1,cmaxpercfactorpvhash=15,cmaxpercfactorPnew=15, savfil=None,savfilep=None,alreadysaved=False):
X,Y,P,pvhash,Pnew,swash = extract_twapv_terms(geofil,vgeofil,fil,fil2,
xstart,xend,ystart,yend,zs,ze,meanax,alreadysaved=alreadysaved,fil3=fil3)
cmax = np.nanpercentile(P,[cmaxpercfactor,100-cmaxpercfactor])
cmax = np.max(np.fabs(cmax))
fig,ax = plt.subplots(np.int8(np.ceil(P.shape[-1]/2)),2,
sharex=True,sharey=True,figsize=(12, 9))
ti = ['(a)','(b)','(c)','(d)','(e)','(f)','(g)','(h)',
'(i)','(j)','(k)','(l)','(m)','(n)','(o)','(p)','(q)','(r)']
labx = [ r'$(\hat{u}\hat{u}_{\tilde{x}})_{\tilde{y}}$',
r'$(\hat{v}\hat{u}_{\tilde{y}})_{\tilde{y}}$',
r'$(\hat{\varpi}\hat{u}_{\tilde{b}})_{\tilde{y}}$',
r'$(-f\hat{v})_{\tilde{y}}$',
r'$(\overline{m_{\tilde{x}}})_{\tilde{y}}$',
r"""$(\frac{1}{\overline{h}}(\overline{h}\widehat{u^{\prime \prime}u^{\prime \prime}}+\frac{1}{2}\overline{\zeta^{\prime 2}})_{\tilde{x}})_{\tilde{y}}$""",
r"""$(\frac{1}{\overline{h}}(\overline{h}\widehat{u^{\prime \prime}v^{\prime \prime}})_{\tilde{y}}$""",
r"""$(\frac{1}{\overline{h}}(\overline{h}\widehat{u^{\prime \prime}\varpi^{\prime \prime}} + \overline{\zeta^{\prime}m_{\tilde{x}}^{\prime}})_{\tilde{b}})_{\tilde{y}}$""",
r'$(-\widehat{X^H})_{\tilde{y}}$',
r'$(-\widehat{X^V})_{\tilde{y}}$']
laby = [ r'$(-\hat{u}\hat{v}_{\tilde{x}})_{\tilde{x}}$',
r'$(-\hat{v}\hat{v}_{\tilde{y}})_{\tilde{x}}$',
r'$(-\hat{\varpi}\hat{v}_{\tilde{b}})_{\tilde{x}}$',
r'$(-f\hat{u})_{\tilde{x}}$',
r'$(-\overline{m_{\tilde{y}}})_{\tilde{x}}$',
r"""$(-\frac{1}{\overline{h}}(\overline{h}\widehat{u^{\prime \prime}v^{\prime \prime}})_{\tilde{x}})_{\tilde{x}}$""",
r"""$(-\frac{1}{\overline{h}}(\overline{h}\widehat{v^{\prime \prime}v^{\prime \prime}}+\frac{1}{2}\overline{\zeta^{\prime 2}})_{\tilde{y}})_{\tilde{x}}$""",
r"""$(-\frac{1}{\overline{h}}(\overline{h}\widehat{v^{\prime \prime}\varpi^{\prime \prime}} + \overline{\zeta^{\prime}m_{\tilde{y}}^{\prime}})_{\tilde{b}})_{\tilde{x}}$""",
r'$(\widehat{Y^H})_{\tilde{x}}$',
r'$(\widehat{Y^V})_{\tilde{x}}$']
for i in range(P.shape[-1]):
axc = ax.ravel()[i]
im = m6plot((X,Y,P[:,:,i]),axc,vmax=cmax,vmin=-cmax,ptype='imshow',
txt=labx[i]+' + '+laby[i], ylim=(-2500,0),
cmap='RdBu_r', cbar=False)
if fil3:
cs = axc.contour(X,Y,swash,np.array([swashperc]), colors='k')
if i % 2 == 0:
axc.set_ylabel('z (m)')
if i > np.size(ax)-3:
xdegtokm(axc,0.5*(ystart+yend))
fig.tight_layout()
cb = fig.colorbar(im, ax=ax.ravel().tolist())
cb.formatter.set_powerlimits((0, 0))
cb.update_ticks()
if savfil:
plt.savefig(savfil+'.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
im = m6plot((X,Y,np.sum(P,axis=2)),vmax=cmax,vmin=-cmax,ptype='imshow',cmap='RdBu_r',ylim=(-2500,0))
if savfil:
plt.savefig(savfil+'res.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
fig,ax = plt.subplots(np.int8(np.ceil(len(plotterms)/2)),2,
sharex=True,sharey=True,figsize=(12,7))
cmaxpvhash = np.nanpercentile(pvhash,
[cmaxpercfactorpvhash,100-cmaxpercfactorpvhash])
cmaxpvhash = np.max(np.fabs(cmaxpvhash))
cmax = np.nanpercentile(Pnew,
[cmaxpercfactorPnew,100-cmaxpercfactorPnew])
cmax = np.max(np.fabs(cmax))
lab = [ r"$-\hat{u}\Pi^{\#}_{\tilde{x}}$",
r"$-\hat{v}\Pi^{\#}_{\tilde{y}}$",
r"$\Pi^{\#}(\bar{h} \hat{\varpi})_{\tilde{b}}$",
r"$\frac{(\hat{\varpi}\hat{u}_{\tilde{b}})_{\tilde{y}}}{\bar{h}}$",
r"""$\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{u^{\prime \prime}u^{\prime \prime}}+\frac{1}{2}\overline{\zeta^{\prime 2}})_{\tilde{x}})_{\tilde{y}}$""",
r"""$\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{u^{\prime \prime}v^{\prime \prime}})_{\tilde{y}})_{\tilde{y}}$""",
r"""$\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{u^{\prime \prime}\varpi^{\prime \prime}} + \overline{\zeta^{\prime}m_{\tilde{x}}^{\prime}})_{\tilde{b}})_{\tilde{y}}$""",
r'$-\frac{1}{\bar{h}}(\widehat{X^H})_{\tilde{y}}$',
r'$-\frac{1}{\bar{h}}(\widehat{X^V})_{\tilde{y}}$',
r'$-\frac{(\hat{\varpi}\hat{v}_{\tilde{b}})_{\tilde{x}}}{\bar{h}}$',
r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{u^{\prime \prime}v^{\prime \prime}})_{\tilde{x}})_{\tilde{x}}$""",
#r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{v^{\prime \prime}v^{\prime \prime}}+\frac{1}{2}\overline{\zeta^{\prime 2}})_{\tilde{y}})_{\tilde{x}}$""",
r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{v^{\prime \prime}v^{\prime \prime}})_{\tilde{y}})_{\tilde{x}}$""",
#r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{v^{\prime \prime}\varpi^{\prime \prime}} + \overline{\zeta^{\prime}m_{\tilde{y}}^{\prime}})_{\tilde{b}})_{\tilde{x}}$""",
r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{\zeta_{\tilde{b}}}}(\overline{\zeta^{\prime}m_{\tilde{y}}^{\prime}})_{\tilde{b}})_{\tilde{x}}$""",
r'$\frac{1}{\bar{h}}(\widehat{Y^H})_{\tilde{x}}$',
r'$\frac{1}{\bar{h}}(\widehat{Y^V})_{\tilde{x}}$',
r'$B_{\tilde{x} \tilde{y}} - B_{\tilde{y} \tilde{x}}$']
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
for i,p in enumerate(plotterms):
axc = ax.ravel()[i]
im = m6plot((X,Y,Pnew[:,:,p]),axc,vmax=cmax,vmin=-cmax,ptype='imshow',
ylim=(-1200,0), txt=lab[p],
cmap='RdBu_r', cbar=False)
im2 = axc.contour(X,Y,pvhash,np.logspace(-6,-5.5,5),colors='grey',linewidths=2)
im2.clabel(inline=True,fmt="%.1e")
if fil3:
cs = axc.contour(X,Y,swash,np.array([swashperc]), colors='k')
if i % 2 == 0:
axc.set_ylabel('z (m)')
if i > np.size(ax)-3:
xdegtokm(axc,0.5*(ystart+yend))
fig.tight_layout()
cb = fig.colorbar(im, ax=ax.ravel().tolist())
cb.formatter.set_powerlimits((0, 0))
cb.update_ticks()
if savfil:
plt.savefig(savfil+'Pnew.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
im = m6plot((X,Y,np.sum(Pnew,axis=2)),ptype='imshow',vmax=cmax,vmin=-cmax,cmap='RdBu_r',ylim=(-2500,0))
if savfil:
plt.savefig(savfil+'Pnewres.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
fig,ax = plt.subplots(1,2,sharex=True,sharey=True,figsize=(10, 3))
im = m6plot((X,Y,np.nansum(Pnew[:,:,:2],axis=2)),
ax[0],vmax=cmax,vmin=-cmax,ptype='imshow',
ylim=(-1200,0), txt=lab[0]+lab[1],
cmap='RdBu_r', cbar=False)
im = m6plot((X,Y,np.nansum(Pnew[:,:,12:13],axis=2)),
ax[1],vmax=cmax,vmin=-cmax,ptype='imshow',
ylim=(-1200,0), txt=lab[12],
cmap='RdBu_r', cbar=False)
ax[0].set_ylabel('z (m)')
for axc in ax:
xdegtokm(axc,0.5*(ystart+yend))
im2 = axc.contour(X,Y,pvhash,np.logspace(-6,-5.5,5),colors='grey',linewidths=2)
im2.clabel(inline=True,fmt="%.1e")
axc.set_ylim(-1200,0)
fig.tight_layout()
cb = fig.colorbar(im, ax=ax.ravel().tolist())
cb.formatter.set_powerlimits((0, 0))
cb.update_ticks()
if savfil:
plt.savefig(savfil+'Pnewnew.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
cmax = np.nanpercentile(pvhash,
[cmaxpercfactorpvhash,100-cmaxpercfactorpvhash])
cmax = np.max(np.fabs(cmax))
im = m6plot((X,Y,pvhash),ptype='imshow',vmax=cmax,vmin=-cmax,cmap='RdBu_r',ylim=(-2500,0))
if fil3:
cs = axc.contour(X,Y,swash,np.array([swashperc]), colors='k')
if savfil:
plt.savefig(savfil+'pvhash.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
| gpl-3.0 |
alexeyum/scikit-learn | sklearn/manifold/setup.py | 24 | 1279 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.c"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.c"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
doutib/lobpredict | lobpredictrst/jupyter/simple_model/create_simple_model_predict.py | 1 | 6870 |
# coding: utf-8
# # The best model parameters are given by
# ```
# author : SHAMINDRA
# data_source_dir : SC_shuffle
# test_type : validation
# model_type : RF
# RF:
# n_estimators : 100
# criterion : 'gini'
# max_features : 'auto'
# max_depth : 20
# n_jobs : 1
# SVM:
# kernel : 'rbf'
# degree : 3
# gamma : 'auto'
# tol : 0.001
# NNET:
# method1 : 'Tanh'
# neurons1 : 24
# method2 : 'Tanh'
# neurons2 : 39
# decay : 0.0001
# learning_rate : 0.001
# n_iter : 25
# random_state : 1
# ```
# In[66]:
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import imp
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import log_loss
from sklearn.metrics import accuracy_score
import pandas as pd
# We looked at the top features from the best performing random forest. They are as below:
# In[48]:
# The top variables are:
var_importance = [(1, 'P_1_bid', 0.020001165389254737)
, (2, 'V_1_bid', 0.018358575666246449)
, (3, 'P_1_ask', 0.017058479215839299)
, (4, 'V_1_ask', 0.016953559068869958)
, (5, 'P_2_bid', 0.016908649059514971)
, (6, 'V_2_bid', 0.016219220215427665)
, (7, 'P_2_ask', 0.015039647893425838)
, (8, 'V_2_ask', 0.014497773408233052)
, (9, 'P_3_bid', 0.014321084019596746)
, (10, 'V_3_bid', 0.014158850118003859)
, (11, 'P_3_ask', 0.014101386932514923)
, (12, 'V_3_ask', 0.013911823640617986)
, (13, 'P_4_bid', 0.013838322603744435)
, (14, 'V_4_bid', 0.013668619218980316)
, (15, 'P_4_ask', 0.013413471959983998)]
# In[33]:
# Open test and train sets
df_train = pd.read_csv(train_ds_ref
, compression='gzip', index_col = None)
df_test = pd.read_csv(test_ds_ref
, compression='gzip', index_col = None)
# Drop the first columns - they are not useful
df_train_clean = df_train.iloc[:,1:]
df_test_clean = df_test.iloc[:,1:]
# In[34]:
X_train_cols = list(df_train_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']].columns.values)
X_train = np.array(df_train_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']])
Y_train = np.array(df_train_clean[['labels']])[:,0]
X_test = np.array(df_test_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']])
Y_test = np.array(df_test_clean[['labels']])[:,0]
# In[38]:
# Define the labels
labels = np.unique(Y_train)
## # Scale Data
scaler = MinMaxScaler()
X_test = scaler.fit_transform(X_test)
X_train = scaler.fit_transform(X_train)
# Set up the data
logreg = linear_model.LogisticRegression(C=1e5)
# Fit
logreg.fit(X_train, Y_train)
# Predict
Y_hat = logreg.predict(X_test)
Y_probs = logreg.predict_proba(X_test)
## # Misclassification error rate
miss_err = 1-accuracy_score(Y_test, Y_hat)
## # Log Loss
eps = 10^(-15)
logloss = log_loss(Y_test, Y_probs, eps = eps)
##confusion_matrix
confusion_matrix1 = confusion_matrix(y_true=Y_test, y_pred=Y_hat
, labels=labels)
# classification_report
classification_report1 = classification_report(y_true=Y_test, y_pred=Y_hat)
# Output results in a list format
result = []
result.append("confusion_matrix")
result.append(confusion_matrix1)
result.append("classification_report")
result.append(classification_report1)
result.append("logloss")
result.append(logloss)
result.append("miss_err")
result.append(miss_err)
result.append("Y_hat")
result.append(Y_hat)
# In[46]:
print(result[3])
print(Y_hat)
print(Y_probs)
# #### The predicted output for our most successful RF model is as follows
# ```
# classification_report
#
# precision recall f1-score support
#
# -1 0.99 0.98 0.98 18373
# 0 0.97 0.98 0.97 16950
# 1 0.99 0.98 0.98 15265
#
# avg / total 0.98 0.98 0.98 50588
# ```
# In[49]:
def predict_simple_linear(df_train_clean, df_test_clean):
X_train_cols = list(df_train_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']].columns.values)
X_train = np.array(df_train_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']])
Y_train = np.array(df_train_clean[['labels']])[:,0]
X_test = np.array(df_test_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']])
Y_test = np.array(df_test_clean[['labels']])[:,0]
# Define the labels
labels = np.unique(Y_train)
## # Scale Data
scaler = MinMaxScaler()
X_test = scaler.fit_transform(X_test)
X_train = scaler.fit_transform(X_train)
# Set up the data
logreg = linear_model.LogisticRegression(C=1e5)
# Fit
logreg.fit(X_train, Y_train)
# Predict
Y_hat = logreg.predict(X_test)
Y_probs = logreg.predict_proba(X_test)
## # Misclassification error rate
miss_err = 1-accuracy_score(Y_test, Y_hat)
## # Log Loss
eps = 10^(-15)
logloss = log_loss(Y_test, Y_probs, eps = eps)
##confusion_matrix
confusion_matrix1 = confusion_matrix(y_true=Y_test, y_pred=Y_hat
, labels=labels)
# classification_report
classification_report1 = classification_report(y_true=Y_test, y_pred=Y_hat)
# Output results in a list format
result = []
result.append("confusion_matrix")
result.append(confusion_matrix1)
result.append("classification_report")
result.append(classification_report1)
result.append("logloss")
result.append(logloss)
result.append("miss_err")
result.append(miss_err)
result.append("Y_hat")
result.append(Y_hat)
return result
# In[62]:
linear_simple_predict = predict_simple_linear(df_train_clean = df_train_clean
, df_test_clean = df_train_clean)
# In[64]:
# Get the predicted outcomes
linear_simple_predict_vals = linear_simple_predict[len(linear_simple_predict) -1]
len(list(linear_simple_predict_vals))
# In[67]:
modl = imp.load_source('execute_model', '../../execute_model.py')
# In[ ]:
| isc |
rfoxfa/python-utils | utils/plotting.py | 1 | 1798 | """
Plotting functions.
"""
from __future__ import absolute_import
import matplotlib.pyplot as plt
import numpy as np
def hhist(items, title=None, axislabel=None, color=None, height=None, width=None, reverse=False):
"""
Plots a horizontal histogram of values and frequencies.
Arguments:
items (iterable[any]) => A list of objects.
title (Optional[str]) => A title for the resulting histogram.
axislabel (Optional[str]) => A label for the y-axis that lists the unique items in
the parameter list.
color (Optional[str]) => A matplotlib color value for coloring the histogram
(default: matplotlib's default plot color, a royal blue)
height (Optional[int]) => A height for the plot (default: 10)
width (Optional[int]) => A width for the plot (default: 20)
reverse (Optional[bool]) => Whether or not the histogram should plot from top to bottom in
order of decreasing frequency or the reverse of that.
Returns:
Void, but a matplotlib figure should be produced (type=None).
"""
# Parse the unique items and their counts.
unique_items, item_counts = np.unique(items, return_counts=True)
# Sort the items by frequency.
item_counts, unique_items = zip(*sorted(zip(item_counts, unique_items), reverse=reverse))
# Plot the frequencies.
pos = np.arange(len(unique_items)) + 0.5
plt.figure(figsize=((width or 20), (height or 10)))
plt.barh(pos, item_counts, align='center', color=color)
plt.yticks(pos, unique_items)
plt.xlabel('Frequency')
if axislabel:
plt.ylabel(axislabel)
if title:
plt.title(title)
plt.show()
| gpl-2.0 |
ClimbsRocks/scikit-learn | sklearn/utils/tests/test_validation.py | 8 | 18964 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
has_fit_parameter,
check_is_fitted,
check_consistent_length,
)
from sklearn.exceptions import NotFittedError
from sklearn.exceptions import DataConversionWarning
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=(10, 10), dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M.ravel())
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
@ignore_warnings
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
assert_warns(DeprecationWarning, check_array, [0, 1, 2])
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_on_mock_dataframe():
arr = np.array([[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]])
mock_df = MockDataFrame(arr)
checked_arr = check_array(mock_df)
assert_equal(checked_arr.dtype,
arr.dtype)
checked_arr = check_array(mock_df, dtype=np.float32)
assert_equal(checked_arr.dtype, np.dtype(np.float32))
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [[]])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = assert_warns(DeprecationWarning, check_array, [42],
ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
rexshihaoren/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
wlamond/scikit-learn | sklearn/cluster/setup.py | 79 | 1855 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.pyx'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.pyx'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means_elkan',
sources=['_k_means_elkan.pyx'],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('_k_means',
libraries=cblas_libs,
sources=['_k_means.pyx'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop(
'extra_compile_args', []),
**blas_info
)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
levelrf/level_basestation | gr-filter/examples/fir_filter_ccc.py | 13 | 3154 | #!/usr/bin/env python
from gnuradio import gr, filter
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
self.head = gr.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = gr.vector_sink_c()
self.vsnk_out = gr.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
etkirsch/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
Unidata/MetPy | v0.9/_downloads/8591910a2b42dadcf3b05658ddd9c600/isentropic_example.py | 2 | 7222 | # Copyright (c) 2017,2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
===================
Isentropic Analysis
===================
The MetPy function `mpcalc.isentropic_interpolation` allows for isentropic analysis from model
analysis data in isobaric coordinates.
"""
########################################
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, add_timestamp
from metpy.units import units
#######################################
# **Getting the data**
#
# In this example, NARR reanalysis data for 18 UTC 04 April 1987 from the National Centers
# for Environmental Information (https://www.ncdc.noaa.gov/data-access/model-data)
# will be used.
data = xr.open_dataset(get_test_data('narr_example.nc', False))
##########################
print(list(data.variables))
#############################
# We will reduce the dimensionality of the data as it is pulled in to remove an empty time
# dimension.
# Assign data to variable names
lat = data['lat']
lon = data['lon']
lev = data['isobaric']
times = data['time']
tmp = data['Temperature'][0]
uwnd = data['u_wind'][0]
vwnd = data['v_wind'][0]
spech = data['Specific_humidity'][0]
# pint doesn't understand gpm
data['Geopotential_height'].attrs['units'] = 'meter'
hgt = data['Geopotential_height'][0]
#############################
# To properly interpolate to isentropic coordinates, the function must know the desired output
# isentropic levels. An array with these levels will be created below.
isentlevs = [296.] * units.kelvin
####################################
# **Conversion to Isentropic Coordinates**
#
# Once three dimensional data in isobaric coordinates has been pulled and the desired
# isentropic levels created, the conversion to isentropic coordinates can begin. Data will be
# passed to the function as below. The function requires that isentropic levels, isobaric
# levels, and temperature be input. Any additional inputs (in this case relative humidity, u,
# and v wind components) will be linearly interpolated to isentropic space.
isent_anal = mpcalc.isentropic_interpolation(isentlevs,
lev,
tmp,
spech,
uwnd,
vwnd,
hgt,
tmpk_out=True)
#####################################
# The output is a list, so now we will separate the variables to different names before
# plotting.
isentprs, isenttmp, isentspech, isentu, isentv, isenthgt = isent_anal
isentu.ito('kt')
isentv.ito('kt')
########################################
# A quick look at the shape of these variables will show that the data is now in isentropic
# coordinates, with the number of vertical levels as specified above.
print(isentprs.shape)
print(isentspech.shape)
print(isentu.shape)
print(isentv.shape)
print(isenttmp.shape)
print(isenthgt.shape)
#################################
# **Converting to Relative Humidity**
#
# The NARR only gives specific humidity on isobaric vertical levels, so relative humidity will
# have to be calculated after the interpolation to isentropic space.
isentrh = 100 * mpcalc.relative_humidity_from_specific_humidity(isentspech, isenttmp, isentprs)
#######################################
# **Plotting the Isentropic Analysis**
# Set up our projection
crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0)
# Coordinates to limit map area
bounds = [(-122., -75., 25., 50.)]
# Choose a level to plot, in this case 296 K
level = 0
fig = plt.figure(figsize=(17., 12.))
add_metpy_logo(fig, 120, 245, size='large')
ax = fig.add_subplot(1, 1, 1, projection=crs)
ax.set_extent(*bounds, crs=ccrs.PlateCarree())
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75)
ax.add_feature(cfeature.STATES, linewidth=0.5)
# Plot the surface
clevisent = np.arange(0, 1000, 25)
cs = ax.contour(lon, lat, isentprs[level, :, :], clevisent,
colors='k', linewidths=1.0, linestyles='solid', transform=ccrs.PlateCarree())
ax.clabel(cs, fontsize=10, inline=1, inline_spacing=7,
fmt='%i', rightside_up=True, use_clabeltext=True)
# Plot RH
cf = ax.contourf(lon, lat, isentrh[level, :, :], range(10, 106, 5),
cmap=plt.cm.gist_earth_r, transform=ccrs.PlateCarree())
cb = fig.colorbar(cf, orientation='horizontal', extend='max', aspect=65, shrink=0.5, pad=0.05,
extendrect='True')
cb.set_label('Relative Humidity', size='x-large')
# Plot wind barbs
ax.barbs(lon.values, lat.values, isentu[level, :, :].m, isentv[level, :, :].m, length=6,
regrid_shape=20, transform=ccrs.PlateCarree())
# Make some titles
ax.set_title('{:.0f} K Isentropic Pressure (hPa), Wind (kt), Relative Humidity (percent)'
.format(isentlevs[level].m), loc='left')
add_timestamp(ax, times[0].dt, y=0.02, high_contrast=True)
fig.tight_layout()
######################################
# **Montgomery Streamfunction**
#
# The Montgomery Streamfunction, :math:`{\psi} = gdz + CpT`, is often desired because its
# gradient is proportional to the geostrophic wind in isentropic space. This can be easily
# calculated with `mpcalc.montgomery_streamfunction`.
# Calculate Montgomery Streamfunction and scale by 10^-2 for plotting
msf = mpcalc.montgomery_streamfunction(isenthgt, isenttmp) / 100.
# Choose a level to plot, in this case 296 K
level = 0
fig = plt.figure(figsize=(17., 12.))
add_metpy_logo(fig, 120, 250, size='large')
ax = plt.subplot(111, projection=crs)
ax.set_extent(*bounds, crs=ccrs.PlateCarree())
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75)
ax.add_feature(cfeature.STATES.with_scale('50m'), linewidth=0.5)
# Plot the surface
clevmsf = np.arange(0, 4000, 5)
cs = ax.contour(lon, lat, msf[level, :, :], clevmsf,
colors='k', linewidths=1.0, linestyles='solid', transform=ccrs.PlateCarree())
ax.clabel(cs, fontsize=10, inline=1, inline_spacing=7,
fmt='%i', rightside_up=True, use_clabeltext=True)
# Plot RH
cf = ax.contourf(lon, lat, isentrh[level, :, :], range(10, 106, 5),
cmap=plt.cm.gist_earth_r, transform=ccrs.PlateCarree())
cb = fig.colorbar(cf, orientation='horizontal', extend='max', aspect=65, shrink=0.5, pad=0.05,
extendrect='True')
cb.set_label('Relative Humidity', size='x-large')
# Plot wind barbs.
ax.barbs(lon.values, lat.values, isentu[level, :, :].m, isentv[level, :, :].m, length=6,
regrid_shape=20, transform=ccrs.PlateCarree())
# Make some titles
ax.set_title('{:.0f} K Montgomery Streamfunction '.format(isentlevs[level].m) +
r'($10^{-2} m^2 s^{-2}$), ' +
'Wind (kt), Relative Humidity (percent)', loc='left')
add_timestamp(ax, times[0].dt, y=0.02, pretext='Valid: ', high_contrast=True)
fig.tight_layout()
plt.show()
| bsd-3-clause |
del680202/MachineLearning-memo | src/tensorflow/autocoder.py | 1 | 6809 | # View more python learning tutorial on my Youtube and Youku channel!!!
# My tutorial website: https://morvanzhou.github.io/tutorials/
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
# Visualize decoder setting
# Parameters
learning_rate = 0.01
training_epochs = 5
batch_size = 256
display_step = 1
examples_to_show = 10
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
# hidden layer settings
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 128 # 2nd layer num features
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([n_input])),
}
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
return layer_2
# Building the decoder
def decoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
return layer_2
"""
# Visualize encoder setting
# Parameters
learning_rate = 0.01 # 0.01 this learning rate will be better! Tested
training_epochs = 10
batch_size = 256
display_step = 1
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
# hidden layer settings
n_hidden_1 = 128
n_hidden_2 = 64
n_hidden_3 = 10
n_hidden_4 = 2
weights = {
'encoder_h1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1],)),
'encoder_h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2],)),
'encoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3],)),
'encoder_h4': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_4],)),
'decoder_h1': tf.Variable(tf.truncated_normal([n_hidden_4, n_hidden_3],)),
'decoder_h2': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_2],)),
'decoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_1],)),
'decoder_h4': tf.Variable(tf.truncated_normal([n_hidden_1, n_input],)),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'encoder_b3': tf.Variable(tf.random_normal([n_hidden_3])),
'encoder_b4': tf.Variable(tf.random_normal([n_hidden_4])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_3])),
'decoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b3': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b4': tf.Variable(tf.random_normal([n_input])),
}
def encoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_h3']),
biases['encoder_b3']))
layer_4 = tf.add(tf.matmul(layer_3, weights['encoder_h4']),
biases['encoder_b4'])
return layer_4
def decoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_h3']),
biases['decoder_b3']))
layer_4 = tf.nn.sigmoid(tf.add(tf.matmul(layer_3, weights['decoder_h4']),
biases['decoder_b4']))
return layer_4
"""
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X
# Define loss and optimizer, minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
sess.run(tf.initialize_all_variables())
total_batch = int(mnist.train.num_examples/batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size) # max(x) = 1, min(x) = 0
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(c))
print("Optimization Finished!")
# # Applying encode and decode over test set
encode_decode = sess.run(
y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(examples_to_show):
a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28))) #Raw image
a[1][i].imshow(np.reshape(encode_decode[i], (28, 28))) # encode -> decode image
plt.show()
# encoder_result = sess.run(encoder_op, feed_dict={X: mnist.test.images})
# plt.scatter(encoder_result[:, 0], encoder_result[:, 1], c=mnist.test.labels)
# plt.colorbar()
# plt.show()
| apache-2.0 |
liangz0707/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
Lightmatter/django-inlineformfield | .tox/py27/lib/python2.7/site-packages/IPython/html/notebookapp.py | 5 | 34560 | # coding: utf-8
"""A tornado based IPython notebook server.
Authors:
* Brian Granger
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import errno
import io
import json
import logging
import os
import random
import re
import select
import signal
import socket
import sys
import threading
import time
import webbrowser
# Third party
# check for pyzmq 2.1.11
from IPython.utils.zmqrelated import check_for_zmq
check_for_zmq('2.1.11', 'IPython.html')
from jinja2 import Environment, FileSystemLoader
# Install the pyzmq ioloop. This has to be done before anything else from
# tornado is imported.
from zmq.eventloop import ioloop
ioloop.install()
# check for tornado 3.1.0
msg = "The IPython Notebook requires tornado >= 3.1.0"
try:
import tornado
except ImportError:
raise ImportError(msg)
try:
version_info = tornado.version_info
except AttributeError:
raise ImportError(msg + ", but you have < 1.1.0")
if version_info < (3,1,0):
raise ImportError(msg + ", but you have %s" % tornado.version)
from tornado import httpserver
from tornado import web
# Our own libraries
from IPython.html import DEFAULT_STATIC_FILES_PATH
from .base.handlers import Template404
from .log import log_request
from .services.kernels.kernelmanager import MappingKernelManager
from .services.notebooks.nbmanager import NotebookManager
from .services.notebooks.filenbmanager import FileNotebookManager
from .services.clusters.clustermanager import ClusterManager
from .services.sessions.sessionmanager import SessionManager
from .base.handlers import AuthenticatedFileHandler, FileFindHandler
from IPython.config import Config
from IPython.config.application import catch_config_error, boolean_flag
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir
from IPython.consoleapp import IPythonConsoleApp
from IPython.kernel import swallow_argv
from IPython.kernel.zmq.session import default_secure
from IPython.kernel.zmq.kernelapp import (
kernel_flags,
kernel_aliases,
)
from IPython.nbformat.sign import NotebookNotary
from IPython.utils.importstring import import_item
from IPython.utils import submodule
from IPython.utils.traitlets import (
Dict, Unicode, Integer, List, Bool, Bytes,
DottedObjectName, TraitError,
)
from IPython.utils import py3compat
from IPython.utils.path import filefind, get_ipython_dir
from .utils import url_path_join
#-----------------------------------------------------------------------------
# Module globals
#-----------------------------------------------------------------------------
_examples = """
ipython notebook # start the notebook
ipython notebook --profile=sympy # use the sympy profile
ipython notebook --certfile=mycert.pem # use SSL/TLS certificate
"""
#-----------------------------------------------------------------------------
# Helper functions
#-----------------------------------------------------------------------------
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def load_handlers(name):
"""Load the (URL pattern, handler) tuples for each component."""
name = 'IPython.html.' + name
mod = __import__(name, fromlist=['default_handlers'])
return mod.default_handlers
#-----------------------------------------------------------------------------
# The Tornado web application
#-----------------------------------------------------------------------------
class NotebookWebApplication(web.Application):
def __init__(self, ipython_app, kernel_manager, notebook_manager,
cluster_manager, session_manager, log, base_url,
settings_overrides, jinja_env_options):
settings = self.init_settings(
ipython_app, kernel_manager, notebook_manager, cluster_manager,
session_manager, log, base_url, settings_overrides, jinja_env_options)
handlers = self.init_handlers(settings)
super(NotebookWebApplication, self).__init__(handlers, **settings)
def init_settings(self, ipython_app, kernel_manager, notebook_manager,
cluster_manager, session_manager, log, base_url,
settings_overrides, jinja_env_options=None):
# Python < 2.6.5 doesn't accept unicode keys in f(**kwargs), and
# base_url will always be unicode, which will in turn
# make the patterns unicode, and ultimately result in unicode
# keys in kwargs to handler._execute(**kwargs) in tornado.
# This enforces that base_url be ascii in that situation.
#
# Note that the URLs these patterns check against are escaped,
# and thus guaranteed to be ASCII: 'héllo' is really 'h%C3%A9llo'.
base_url = py3compat.unicode_to_str(base_url, 'ascii')
template_path = settings_overrides.get("template_path", os.path.join(os.path.dirname(__file__), "templates"))
jenv_opt = jinja_env_options if jinja_env_options else {}
env = Environment(loader=FileSystemLoader(template_path),**jenv_opt )
settings = dict(
# basics
log_function=log_request,
base_url=base_url,
template_path=template_path,
static_path=ipython_app.static_file_path,
static_handler_class = FileFindHandler,
static_url_prefix = url_path_join(base_url,'/static/'),
# authentication
cookie_secret=ipython_app.cookie_secret,
login_url=url_path_join(base_url,'/login'),
password=ipython_app.password,
# managers
kernel_manager=kernel_manager,
notebook_manager=notebook_manager,
cluster_manager=cluster_manager,
session_manager=session_manager,
# IPython stuff
nbextensions_path = ipython_app.nbextensions_path,
mathjax_url=ipython_app.mathjax_url,
config=ipython_app.config,
jinja2_env=env,
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
return settings
def init_handlers(self, settings):
# Load the (URL pattern, handler) tuples for each component.
handlers = []
handlers.extend(load_handlers('base.handlers'))
handlers.extend(load_handlers('tree.handlers'))
handlers.extend(load_handlers('auth.login'))
handlers.extend(load_handlers('auth.logout'))
handlers.extend(load_handlers('notebook.handlers'))
handlers.extend(load_handlers('nbconvert.handlers'))
handlers.extend(load_handlers('services.kernels.handlers'))
handlers.extend(load_handlers('services.notebooks.handlers'))
handlers.extend(load_handlers('services.clusters.handlers'))
handlers.extend(load_handlers('services.sessions.handlers'))
handlers.extend(load_handlers('services.nbconvert.handlers'))
# FIXME: /files/ should be handled by the Contents service when it exists
nbm = settings['notebook_manager']
if hasattr(nbm, 'notebook_dir'):
handlers.extend([
(r"/files/(.*)", AuthenticatedFileHandler, {'path' : nbm.notebook_dir}),
(r"/nbextensions/(.*)", FileFindHandler, {'path' : settings['nbextensions_path']}),
])
# prepend base_url onto the patterns that we match
new_handlers = []
for handler in handlers:
pattern = url_path_join(settings['base_url'], handler[0])
new_handler = tuple([pattern] + list(handler[1:]))
new_handlers.append(new_handler)
# add 404 on the end, which will catch everything that falls through
new_handlers.append((r'(.*)', Template404))
return new_handlers
class NbserverListApp(BaseIPythonApplication):
description="List currently running notebook servers in this profile."
flags = dict(
json=({'NbserverListApp': {'json': True}},
"Produce machine-readable JSON output."),
)
json = Bool(False, config=True,
help="If True, each line of output will be a JSON object with the "
"details from the server info file.")
def start(self):
if not self.json:
print("Currently running servers:")
for serverinfo in list_running_servers(self.profile):
if self.json:
print(json.dumps(serverinfo))
else:
print(serverinfo['url'], "::", serverinfo['notebook_dir'])
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(kernel_flags)
flags['no-browser']=(
{'NotebookApp' : {'open_browser' : False}},
"Don't open the notebook in a browser after startup."
)
flags['no-mathjax']=(
{'NotebookApp' : {'enable_mathjax' : False}},
"""Disable MathJax
MathJax is the javascript library IPython uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
# Add notebook manager flags
flags.update(boolean_flag('script', 'FileNotebookManager.save_script',
'Auto-save a .py script everytime the .ipynb notebook is saved',
'Do not auto-save .py scripts for every notebook'))
# the flags that are specific to the frontend
# these must be scrubbed before being passed to the kernel,
# or it will raise an error on unrecognized flags
notebook_flags = ['no-browser', 'no-mathjax', 'script', 'no-script']
aliases = dict(kernel_aliases)
aliases.update({
'ip': 'NotebookApp.ip',
'port': 'NotebookApp.port',
'port-retries': 'NotebookApp.port_retries',
'transport': 'KernelManager.transport',
'keyfile': 'NotebookApp.keyfile',
'certfile': 'NotebookApp.certfile',
'notebook-dir': 'NotebookApp.notebook_dir',
'browser': 'NotebookApp.browser',
})
# remove ipkernel flags that are singletons, and don't make sense in
# multi-kernel evironment:
aliases.pop('f', None)
notebook_aliases = [u'port', u'port-retries', u'ip', u'keyfile', u'certfile',
u'notebook-dir', u'profile', u'profile-dir', 'browser']
#-----------------------------------------------------------------------------
# NotebookApp
#-----------------------------------------------------------------------------
class NotebookApp(BaseIPythonApplication):
name = 'ipython-notebook'
description = """
The IPython HTML Notebook.
This launches a Tornado based HTML Notebook Server that serves up an
HTML5/Javascript Notebook client.
"""
examples = _examples
classes = IPythonConsoleApp.classes + [MappingKernelManager, NotebookManager,
FileNotebookManager, NotebookNotary]
flags = Dict(flags)
aliases = Dict(aliases)
subcommands = dict(
list=(NbserverListApp, NbserverListApp.description.splitlines()[0]),
)
kernel_argv = List(Unicode)
def _log_level_default(self):
return logging.INFO
def _log_format_default(self):
"""override default log format to include time"""
return u"%(asctime)s.%(msecs).03d [%(name)s]%(highlevel)s %(message)s"
# create requested profiles by default, if they don't exist:
auto_create = Bool(True)
# file to be opened in the notebook server
file_to_run = Unicode('', config=True)
def _file_to_run_changed(self, name, old, new):
path, base = os.path.split(new)
if path:
self.file_to_run = base
self.notebook_dir = path
# Network related information
allow_origin = Unicode('', config=True,
help="""Set the Access-Control-Allow-Origin header
Use '*' to allow any origin to access your server.
Takes precedence over allow_origin_pat.
"""
)
allow_origin_pat = Unicode('', config=True,
help="""Use a regular expression for the Access-Control-Allow-Origin header
Requests from an origin matching the expression will get replies with:
Access-Control-Allow-Origin: origin
where `origin` is the origin of the request.
Ignored if allow_origin is set.
"""
)
allow_credentials = Bool(False, config=True,
help="Set the Access-Control-Allow-Credentials: true header"
)
ip = Unicode('localhost', config=True,
help="The IP address the notebook server will listen on."
)
def _ip_changed(self, name, old, new):
if new == u'*': self.ip = u''
port = Integer(8888, config=True,
help="The port the notebook server will listen on."
)
port_retries = Integer(50, config=True,
help="The number of additional ports to try if the specified port is not available."
)
certfile = Unicode(u'', config=True,
help="""The full path to an SSL/TLS certificate file."""
)
keyfile = Unicode(u'', config=True,
help="""The full path to a private key file for usage with SSL/TLS."""
)
cookie_secret = Bytes(b'', config=True,
help="""The random bytes used to secure cookies.
By default this is a new random number every time you start the Notebook.
Set it to a value in a config file to enable logins to persist across server sessions.
Note: Cookie secrets should be kept private, do not share config files with
cookie_secret stored in plaintext (you can read the value from a file).
"""
)
def _cookie_secret_default(self):
return os.urandom(1024)
password = Unicode(u'', config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from IPython.lib import passwd; passwd()
The string should be of the form type:salt:hashed-password.
"""
)
open_browser = Bool(True, config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
""")
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webapp_settings = Dict(config=True,
help="Supply overrides for the tornado.web.Application that the "
"IPython notebook uses.")
jinja_environment_options = Dict(config=True,
help="Supply extra arguments that will be passed to Jinja environment.")
enable_mathjax = Bool(True, config=True,
help="""Whether to enable MathJax for typesetting math/TeX
MathJax is the javascript library IPython uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
def _enable_mathjax_changed(self, name, old, new):
"""set mathjax url to empty if mathjax is disabled"""
if not new:
self.mathjax_url = u''
base_url = Unicode('/', config=True,
help='''The base URL for the notebook server.
Leading and trailing slashes can be omitted,
and will automatically be added.
''')
def _base_url_changed(self, name, old, new):
if not new.startswith('/'):
self.base_url = '/'+new
elif not new.endswith('/'):
self.base_url = new+'/'
base_project_url = Unicode('/', config=True, help="""DEPRECATED use base_url""")
def _base_project_url_changed(self, name, old, new):
self.log.warn("base_project_url is deprecated, use base_url")
self.base_url = new
extra_static_paths = List(Unicode, config=True,
help="""Extra paths to search for serving static files.
This allows adding javascript/css to be available from the notebook server machine,
or overriding individual files in the IPython"""
)
def _extra_static_paths_default(self):
return [os.path.join(self.profile_dir.location, 'static')]
@property
def static_file_path(self):
"""return extra paths + the default location"""
return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH]
nbextensions_path = List(Unicode, config=True,
help="""paths for Javascript extensions. By default, this is just IPYTHONDIR/nbextensions"""
)
def _nbextensions_path_default(self):
return [os.path.join(get_ipython_dir(), 'nbextensions')]
mathjax_url = Unicode("", config=True,
help="""The url for MathJax.js."""
)
def _mathjax_url_default(self):
if not self.enable_mathjax:
return u''
static_url_prefix = self.webapp_settings.get("static_url_prefix",
url_path_join(self.base_url, "static")
)
# try local mathjax, either in nbextensions/mathjax or static/mathjax
for (url_prefix, search_path) in [
(url_path_join(self.base_url, "nbextensions"), self.nbextensions_path),
(static_url_prefix, self.static_file_path),
]:
self.log.debug("searching for local mathjax in %s", search_path)
try:
mathjax = filefind(os.path.join('mathjax', 'MathJax.js'), search_path)
except IOError:
continue
else:
url = url_path_join(url_prefix, u"mathjax/MathJax.js")
self.log.info("Serving local MathJax from %s at %s", mathjax, url)
return url
# no local mathjax, serve from CDN
url = u"https://cdn.mathjax.org/mathjax/latest/MathJax.js"
self.log.info("Using MathJax from CDN: %s", url)
return url
def _mathjax_url_changed(self, name, old, new):
if new and not self.enable_mathjax:
# enable_mathjax=False overrides mathjax_url
self.mathjax_url = u''
else:
self.log.info("Using MathJax: %s", new)
notebook_manager_class = DottedObjectName('IPython.html.services.notebooks.filenbmanager.FileNotebookManager',
config=True,
help='The notebook manager class to use.')
trust_xheaders = Bool(False, config=True,
help=("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers"
"sent by the upstream reverse proxy. Necessary if the proxy handles SSL")
)
info_file = Unicode()
def _info_file_default(self):
info_file = "nbserver-%s.json"%os.getpid()
return os.path.join(self.profile_dir.security_dir, info_file)
notebook_dir = Unicode(py3compat.getcwd(), config=True,
help="The directory to use for notebooks and kernels."
)
def _notebook_dir_changed(self, name, old, new):
"""Do a bit of validation of the notebook dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.notebook_dir = os.path.abspath(new)
return
if not os.path.isdir(new):
raise TraitError("No such notebook dir: %r" % new)
# setting App.notebook_dir implies setting notebook and kernel dirs as well
self.config.FileNotebookManager.notebook_dir = new
self.config.MappingKernelManager.root_dir = new
def parse_command_line(self, argv=None):
super(NotebookApp, self).parse_command_line(argv)
if self.extra_args:
arg0 = self.extra_args[0]
f = os.path.abspath(arg0)
self.argv.remove(arg0)
if not os.path.exists(f):
self.log.critical("No such file or directory: %s", f)
self.exit(1)
# Use config here, to ensure that it takes higher priority than
# anything that comes from the profile.
c = Config()
if os.path.isdir(f):
c.NotebookApp.notebook_dir = f
elif os.path.isfile(f):
c.NotebookApp.file_to_run = f
self.update_config(c)
def init_kernel_argv(self):
"""construct the kernel arguments"""
# Scrub frontend-specific flags
self.kernel_argv = swallow_argv(self.argv, notebook_aliases, notebook_flags)
if any(arg.startswith(u'--pylab') for arg in self.kernel_argv):
self.log.warn('\n '.join([
"Starting all kernels in pylab mode is not recommended,",
"and will be disabled in a future release.",
"Please use the %matplotlib magic to enable matplotlib instead.",
"pylab implies many imports, which can have confusing side effects",
"and harm the reproducibility of your notebooks.",
]))
# Kernel should inherit default config file from frontend
self.kernel_argv.append("--IPKernelApp.parent_appname='%s'" % self.name)
# Kernel should get *absolute* path to profile directory
self.kernel_argv.extend(["--profile-dir", self.profile_dir.location])
def init_configurables(self):
# force Session default to be secure
default_secure(self.config)
self.kernel_manager = MappingKernelManager(
parent=self, log=self.log, kernel_argv=self.kernel_argv,
connection_dir = self.profile_dir.security_dir,
)
kls = import_item(self.notebook_manager_class)
self.notebook_manager = kls(parent=self, log=self.log)
self.session_manager = SessionManager(parent=self, log=self.log)
self.cluster_manager = ClusterManager(parent=self, log=self.log)
self.cluster_manager.update_profiles()
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dipatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
# hook up tornado 3's loggers to our app handlers
for name in ('access', 'application', 'general'):
logger = logging.getLogger('tornado.%s' % name)
logger.parent = self.log
logger.setLevel(self.log.level)
def init_webapp(self):
"""initialize tornado webapp and httpserver"""
self.webapp_settings['allow_origin'] = self.allow_origin
if self.allow_origin_pat:
self.webapp_settings['allow_origin_pat'] = re.compile(self.allow_origin_pat)
self.webapp_settings['allow_credentials'] = self.allow_credentials
self.web_app = NotebookWebApplication(
self, self.kernel_manager, self.notebook_manager,
self.cluster_manager, self.session_manager,
self.log, self.base_url, self.webapp_settings,
self.jinja_environment_options
)
if self.certfile:
ssl_options = dict(certfile=self.certfile)
if self.keyfile:
ssl_options['keyfile'] = self.keyfile
else:
ssl_options = None
self.web_app.password = self.password
self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options,
xheaders=self.trust_xheaders)
if not self.ip:
warning = "WARNING: The notebook server is listening on all IP addresses"
if ssl_options is None:
self.log.critical(warning + " and not using encryption. This "
"is not recommended.")
if not self.password:
self.log.critical(warning + " and not using authentication. "
"This is highly insecure and not recommended.")
success = None
for port in random_ports(self.port, self.port_retries+1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info('The port %i is already in use, trying another random port.' % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warn("Permission to listen on port %i denied" % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical('ERROR: the notebook server could not be started because '
'no available port could be found.')
self.exit(1)
@property
def display_url(self):
ip = self.ip if self.ip else '[all ip addresses on your system]'
return self._url(ip)
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
proto = 'https' if self.certfile else 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
def init_signal(self):
if not sys.platform.startswith('win'):
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
if hasattr(signal, 'SIGUSR1'):
# Windows doesn't support SIGUSR1
signal.signal(signal.SIGUSR1, self._signal_info)
if hasattr(signal, 'SIGINFO'):
# only on BSD-based systems
signal.signal(signal.SIGINFO, self._signal_info)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
# FIXME: remove this delay when pyzmq dependency is >= 2.1.11
time.sleep(0.1)
info = self.log.info
info('interrupted')
print(self.notebook_info())
sys.stdout.write("Shutdown this notebook server (y/[n])? ")
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith('y') and 'n' not in line.lower():
self.log.critical("Shutdown confirmed")
ioloop.IOLoop.instance().stop()
return
else:
print("No answer for 5s:", end=' ')
print("resuming operation...")
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
ioloop.IOLoop.instance().add_callback(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical("received signal %s, stopping", sig)
ioloop.IOLoop.instance().stop()
def _signal_info(self, sig, frame):
print(self.notebook_info())
def init_components(self):
"""Check the components submodule, and warn if it's unclean"""
status = submodule.check_submodule_status()
if status == 'missing':
self.log.warn("components submodule missing, running `git submodule update`")
submodule.update_submodules(submodule.ipython_parent())
elif status == 'unclean':
self.log.warn("components submodule unclean, you may see 404s on static/components")
self.log.warn("run `setup.py submodule` or `git submodule update` to update")
@catch_config_error
def initialize(self, argv=None):
super(NotebookApp, self).initialize(argv)
self.init_logging()
self.init_kernel_argv()
self.init_configurables()
self.init_components()
self.init_webapp()
self.init_signal()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
self.log.info('Shutting down kernels')
self.kernel_manager.shutdown_all()
def notebook_info(self):
"Return the current working directory and the server url information"
info = self.notebook_manager.info_string() + "\n"
info += "%d active kernels \n" % len(self.kernel_manager._kernels)
return info + "The IPython Notebook is running at: %s" % self.display_url
def server_info(self):
"""Return a JSONable dict of information about this server."""
return {'url': self.connection_url,
'hostname': self.ip if self.ip else 'localhost',
'port': self.port,
'secure': bool(self.certfile),
'base_url': self.base_url,
'notebook_dir': os.path.abspath(self.notebook_dir),
}
def write_server_info_file(self):
"""Write the result of server_info() to the JSON file info_file."""
with open(self.info_file, 'w') as f:
json.dump(self.server_info(), f, indent=2)
def remove_server_info_file(self):
"""Remove the nbserver-<pid>.json file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.info_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def start(self):
""" Start the IPython Notebook server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
if self.subapp is not None:
return self.subapp.start()
info = self.log.info
for line in self.notebook_info().split("\n"):
info(line)
info("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).")
self.write_server_info_file()
if self.open_browser or self.file_to_run:
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warn('No web browser found: %s.' % e)
browser = None
if self.file_to_run:
fullpath = os.path.join(self.notebook_dir, self.file_to_run)
if not os.path.exists(fullpath):
self.log.critical("%s does not exist" % fullpath)
self.exit(1)
uri = url_path_join('notebooks', self.file_to_run)
else:
uri = 'tree'
if browser:
b = lambda : browser.open(url_path_join(self.connection_url, uri),
new=2)
threading.Thread(target=b).start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
info("Interrupted...")
finally:
self.cleanup_kernels()
self.remove_server_info_file()
def list_running_servers(profile='default'):
"""Iterate over the server info files of running notebook servers.
Given a profile name, find nbserver-* files in the security directory of
that profile, and yield dicts of their information, each one pertaining to
a currently running notebook server instance.
"""
pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), name=profile)
for file in os.listdir(pd.security_dir):
if file.startswith('nbserver-'):
with io.open(os.path.join(pd.security_dir, file), encoding='utf-8') as f:
yield json.load(f)
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
launch_new_instance = NotebookApp.launch_instance
| mit |
Eric89GXL/scikit-learn | doc/sphinxext/gen_rst.py | 1 | 39133 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from time import time
import os
import re
import shutil
import traceback
import glob
import sys
from StringIO import StringIO
import cPickle
import urllib2
import gzip
import posixpath
try:
from PIL import Image
except:
import Image
import matplotlib
matplotlib.use('Agg')
import token
import tokenize
import numpy as np
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
resp = urllib2.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[value.keys()[0]]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
for comb_name in comb_names:
if html.find(comb_name) >= 0:
url = link + '#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_1.png': (1, 600),
'plot_outlier_detection_1.png': (3, 372),
'plot_gp_regression_1.png': (2, 250),
'plot_adaboost_twoclass_1.png': (1, 372),
'plot_compare_methods_1.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
lines = file(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(iter(lines).next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery",
"Please check your example's layout",
" and make sure it's correct")
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(app.builder.srcdir + '/../' + 'examples')
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
# we create an index.rst with all examples
fhindex = file(os.path.join(root_dir, 'index.rst'), 'w')
#Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
display: none;
}
.figure {
float: left;
margin: 10px;
-webkit-border-radius: 10px; /* Saf3-4, iOS 1-3.2, Android <1.6 */
-moz-border-radius: 10px; /* FF1-3.6 */
border-radius: 10px; /* Opera 10.5, IE9, Saf5, Chrome, FF4, iOS 4, Android 2.1+ */
border: 2px solid #fff;
background-color: white;
/* --> Thumbnail image size */
width: 150px;
height: 100px;
-webkit-background-size: 150px 100px; /* Saf3-4 */
-moz-background-size: 150px 100px; /* FF3.6 */
}
.figure img {
display: inline;
}
div.docstringWrapper p.caption {
display: block;
-webkit-box-shadow: 0px 0px 20px rgba(0, 0, 0, 0.0);
-moz-box-shadow: 0px 0px 20px rgba(0, 0, 0, .0); /* FF3.5 - 3.6 */
box-shadow: 0px 0px 20px rgba(0, 0, 0, 0.0); /* Opera 10.5, IE9, FF4+, Chrome 10+ */
padding: 0px;
border: white;
}
div.docstringWrapper p {
display: none;
background-color: white;
-webkit-box-shadow: 0px 0px 20px rgba(0, 0, 0, 1.00);
-moz-box-shadow: 0px 0px 20px rgba(0, 0, 0, 1.00); /* FF3.5 - 3.6 */
box-shadow: 0px 0px 20px rgba(0, 0, 0, 1.00); /* Opera 10.5, IE9, FF4+, Chrome 10+ */
padding: 13px;
margin-top: 0px;
border-style: solid;
border-width: 1px;
}
</style>
.. raw:: html
<script type="text/javascript">
function animateClone(e){
var position;
position = $(this).position();
var clone = $(this).closest('.thumbnailContainer').find('.clonedItem');
var clone_fig = clone.find('.figure');
clone.css("left", position.left - 70).css("top", position.top - 70).css("position", "absolute").css("z-index", 1000).css("background-color", "white");
var cloneImg = clone_fig.find('img');
clone.show();
clone.animate({
height: "270px",
width: "320px"
}, 0
);
cloneImg.css({
'max-height': "200px",
'max-width': "280px"
});
cloneImg.animate({
height: "200px",
width: "280px"
}, 0
);
clone_fig.css({
'margin-top': '20px',
});
clone_fig.show();
clone.find('p').css("display", "block");
clone_fig.css({
height: "240",
width: "305px"
});
cloneP_height = clone.find('p.caption').height();
clone_fig.animate({
height: (200 + cloneP_height)
}, 0
);
clone.bind("mouseleave", function(e){
clone.animate({
height: "100px",
width: "150px"
}, 10, function(){$(this).hide();});
clone_fig.animate({
height: "100px",
width: "150px"
}, 10, function(){$(this).hide();});
});
} //end animateClone()
$(window).load(function () {
$(".figure").css("z-index", 1);
$(".docstringWrapper").each(function(i, obj){
var clone;
var $obj = $(obj);
clone = $obj.clone();
clone.addClass("clonedItem");
clone.appendTo($obj.closest(".thumbnailContainer"));
clone.hide();
$obj.bind("mouseenter", animateClone);
}); // end each
}); // end
</script>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = file(example_file).readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
tokens = tokenize.generate_tokens(lines.__iter__().next)
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif ((tok_type == 'STRING') and check_docstring):
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = filter(lambda x: x.endswith('.py'), file_list)
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print 80 * '_'
print ('Example directory %s does not have a README.txt file' %
src_dir)
print 'Skipping this directory'
print 80 * '_'
return
fhindex.write("""
%s
""" % file(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
for fname in sorted_listdir:
if fname.endswith('py'):
generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, fdocstring, _ = extract_docstring(new_fname, True)
thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(dir, fname).replace(os.path.sep, '_')
fhindex.write("""
.. raw:: html
<div class="thumbnailContainer">
<div class="docstringWrapper">
""")
fhindex.write('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if dir != '.':
fhindex.write(' :target: ./%s/%s.html\n\n' % (dir,
fname[:-3]))
else:
fhindex.write(' :target: ./%s.html\n\n' % link_name[:-3])
fhindex.write(""" :ref:`example_%s`
.. raw:: html
<p>%s
</p></div>
</div>
.. toctree::
:hidden:
%s/%s
""" % (link_name, fdocstring, dir, fname[:-3]))
fhindex.write("""
.. raw:: html
<div style="clear: both"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) / 2, (height - height_sc) / 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
os.system("optipng -quiet -o 9 '{0}'".format(out_fname))
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%s.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print 'plotting %s' % fname
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
# get variables so we can later add links to the documentation
example_code_obj = {}
for var_name, var in my_globals.iteritems():
if not hasattr(var, '__module__'):
continue
if not isinstance(var.__module__, basestring):
continue
if var.__module__.split('.')[0] not in DOCMODULES:
continue
# get the type as a string with other things stripped
tstr = str(type(var))
tstr = (tstr[tstr.find('\'')
+ 1:tstr.rfind('\'')].split('.')[-1])
# get shortened module name
module_short = get_short_module_name(var.__module__,
tstr)
cobj = {'name': tstr, 'module': var.__module__,
'module_short': module_short,
'obj_type': 'object'}
example_code_obj[var_name] = cobj
# find functions so we can later add links to the documentation
funregex = re.compile('[\w.]+\(')
with open(src_file, 'rt') as fid:
for line in fid.readlines():
if line.startswith('#'):
continue
for match in funregex.findall(line):
fun_name = match[:-1]
try:
exec('this_fun = %s' % fun_name, my_globals)
except Exception as err:
# Here, we were not able to execute the
# previous statement, either because the
# fun_name was not a function but a statement
# (print), or because the regexp didn't
# catch the whole function name :
# eg:
# X = something().blah()
# will work for something, but not blah.
continue
this_fun = my_globals['this_fun']
if not callable(this_fun):
continue
if not hasattr(this_fun, '__module__'):
continue
if not isinstance(this_fun.__module__, basestring):
continue
if (this_fun.__module__.split('.')[0]
not in DOCMODULES):
continue
# get shortened module name
fun_name_short = fun_name.split('.')[-1]
module_short = get_short_module_name(
this_fun.__module__, fun_name_short)
cobj = {'name': fun_name_short,
'module': this_fun.__module__,
'module_short': module_short,
'obj_type': 'function'}
example_code_obj[fun_name] = cobj
fid.close()
if len(example_code_obj) > 0:
# save the dictionary, so we can later add hyperlinks
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
cPickle.dump(example_code_obj, fid,
cPickle.HIGHEST_PROTOCOL)
fid.close()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
for fig_num in (fig_mngr.num for fig_mngr in
matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
plt.savefig(image_path % fig_num)
figure_list.append(image_fname % fig_num)
except:
print 80 * '_'
print '%s is not compiling:' % fname
traceback.print_exc()
print 80 * '_'
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print " - time elapsed : %.2g sec" % time_elapsed
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path % '[1-9]')]
#for f in glob.glob(image_path % '*')]
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
try:
if exception is not None:
return
print 'Embedding documentation hyperlinks in examples..'
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
'http://matplotlib.org')
doc_resolvers['numpy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/numpy-1.6.0')
doc_resolvers['scipy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/scipy-0.11.0/reference')
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print '\tprocessing: %s' % fname
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = cPickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.iteritems():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
if link is not None:
parts = name.split('.')
name_html = orig_pattern % parts[0]
for part in parts[1:]:
name_html += period + orig_pattern % part
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
for name, link in str_repl.iteritems():
line = line.replace(name, link)
fid.write(line.encode('utf-8'))
except urllib2.HTTPError, e:
print ("The following HTTP Error has occurred:\n")
print e.code
except urllib2.URLError, e:
print ("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding due to a URL Error: \n")
print e.args
print '[done]'
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
| bsd-3-clause |
yunfeilu/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 57 | 8062 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed.size, 0)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
davidgbe/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
minhlongdo/scipy | scipy/integrate/odepack.py | 62 | 9420 | # Author: Travis Oliphant
from __future__ import division, print_function, absolute_import
__all__ = ['odeint']
from . import _odepack
from copy import copy
import warnings
class ODEintWarning(Warning):
pass
_msgs = {2: "Integration successful.",
1: "Nothing was done; the integration time was 0.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error)."
}
def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
mxords=5, printmessg=0):
"""
Integrate a system of ordinary differential equations.
Solve a system of ordinary differential equations using lsoda from the
FORTRAN library odepack.
Solves the initial value problem for stiff or non-stiff systems
of first order ode-s::
dy/dt = func(y, t0, ...)
where y can be a vector.
*Note*: The first two arguments of ``func(y, t0, ...)`` are in the
opposite order of the arguments in the system definition function used
by the `scipy.integrate.ode` class.
Parameters
----------
func : callable(y, t0, ...)
Computes the derivative of y at t0.
y0 : array
Initial condition on y (can be a vector).
t : array
A sequence of time points for which to solve for y. The initial
value point should be the first element of this sequence.
args : tuple, optional
Extra arguments to pass to function.
Dfun : callable(y, t0, ...)
Gradient (Jacobian) of `func`.
col_deriv : bool, optional
True if `Dfun` defines derivatives down columns (faster),
otherwise `Dfun` should define derivatives across rows.
full_output : bool, optional
True if to return a dictionary of optional outputs as the second output
printmessg : bool, optional
Whether to print the convergence message
Returns
-------
y : array, shape (len(t), len(y0))
Array containing the value of y for each desired time in t,
with the initial value `y0` in the first row.
infodict : dict, only returned if full_output == True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'hu' vector of step sizes successfully used for each time step.
'tcur' vector with the value of t reached for each time step.
(will always be at least as large as the input times).
'tolsf' vector of tolerance scale factors, greater than 1.0,
computed when a request for too much accuracy was detected.
'tsw' value of t at the time of the last method switch
(given for each time step)
'nst' cumulative number of time steps
'nfe' cumulative number of function evaluations for each time step
'nje' cumulative number of jacobian evaluations for each time step
'nqu' a vector of method orders for each successful step.
'imxer' index of the component of largest magnitude in the
weighted local error vector (e / ewt) on an error return, -1
otherwise.
'lenrw' the length of the double work array required.
'leniw' the length of integer work array required.
'mused' a vector of method indicators for each successful time step:
1: adams (nonstiff), 2: bdf (stiff)
======= ============================================================
Other Parameters
----------------
ml, mu : int, optional
If either of these are not None or non-negative, then the
Jacobian is assumed to be banded. These give the number of
lower and upper non-zero diagonals in this banded matrix.
For the banded case, `Dfun` should return a matrix whose
rows contain the non-zero bands (starting with the lowest diagonal).
Thus, the return matrix `jac` from `Dfun` should have shape
``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
The data in `jac` must be stored such that ``jac[i - j + mu, j]``
holds the derivative of the `i`th equation with respect to the `j`th
state variable. If `col_deriv` is True, the transpose of this
`jac` must be returned.
rtol, atol : float, optional
The input parameters `rtol` and `atol` determine the error
control performed by the solver. The solver will control the
vector, e, of estimated local errors in y, according to an
inequality of the form ``max-norm of (e / ewt) <= 1``,
where ewt is a vector of positive error weights computed as
``ewt = rtol * abs(y) + atol``.
rtol and atol can be either vectors the same length as y or scalars.
Defaults to 1.49012e-8.
tcrit : ndarray, optional
Vector of critical points (e.g. singularities) where integration
care should be taken.
h0 : float, (0: solver-determined), optional
The step size to be attempted on the first step.
hmax : float, (0: solver-determined), optional
The maximum absolute step size allowed.
hmin : float, (0: solver-determined), optional
The minimum absolute step size allowed.
ixpr : bool, optional
Whether to generate extra printing at method switches.
mxstep : int, (0: solver-determined), optional
Maximum number of (internally defined) steps allowed for each
integration point in t.
mxhnil : int, (0: solver-determined), optional
Maximum number of messages printed.
mxordn : int, (0: solver-determined), optional
Maximum order to be allowed for the non-stiff (Adams) method.
mxords : int, (0: solver-determined), optional
Maximum order to be allowed for the stiff (BDF) method.
See Also
--------
ode : a more object-oriented integrator based on VODE.
quad : for finding the area under a curve.
Examples
--------
The second order differential equation for the angle `theta` of a
pendulum acted on by gravity with friction can be written::
theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
where `b` and `c` are positive constants, and a prime (') denotes a
derivative. To solve this equation with `odeint`, we must first convert
it to a system of first order equations. By defining the angular
velocity ``omega(t) = theta'(t)``, we obtain the system::
theta'(t) = omega(t)
omega'(t) = -b*omega(t) - c*sin(theta(t))
Let `y` be the vector [`theta`, `omega`]. We implement this system
in python as:
>>> def pend(y, t, b, c):
... theta, omega = y
... dydt = [omega, -b*omega - c*np.sin(theta)]
... return dydt
...
We assume the constants are `b` = 0.25 and `c` = 5.0:
>>> b = 0.25
>>> c = 5.0
For initial conditions, we assume the pendulum is nearly vertical
with `theta(0)` = `pi` - 0.1, and it initially at rest, so
`omega(0)` = 0. Then the vector of initial conditions is
>>> y0 = [np.pi - 0.1, 0.0]
We generate a solution 101 evenly spaced samples in the interval
0 <= `t` <= 10. So our array of times is:
>>> t = np.linspace(0, 10, 101)
Call `odeint` to generate the solution. To pass the parameters
`b` and `c` to `pend`, we give them to `odeint` using the `args`
argument.
>>> from scipy.integrate import odeint
>>> sol = odeint(pend, y0, t, args=(b, c))
The solution is an array with shape (101, 2). The first column
is `theta(t)`, and the second is `omega(t)`. The following code
plots both components.
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
>>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
>>> plt.legend(loc='best')
>>> plt.xlabel('t')
>>> plt.grid()
>>> plt.show()
"""
if ml is None:
ml = -1 # changed to zero inside function call
if mu is None:
mu = -1 # changed to zero inside function call
t = copy(t)
y0 = copy(y0)
output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
full_output, rtol, atol, tcrit, h0, hmax, hmin,
ixpr, mxstep, mxhnil, mxordn, mxords)
if output[-1] < 0:
warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information."
warnings.warn(warning_msg, ODEintWarning)
elif printmessg:
warning_msg = _msgs[output[-1]]
warnings.warn(warning_msg, ODEintWarning)
if full_output:
output[1]['message'] = _msgs[output[-1]]
output = output[:-1]
if len(output) == 1:
return output[0]
else:
return output
| bsd-3-clause |
lcy-seso/Paddle | python/paddle/dataset/uci_housing.py | 1 | 3748 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
UCI Housing dataset.
This module will download dataset from
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and
parse training set and test set into paddle reader creators.
"""
import numpy as np
import os
import paddle.dataset.common
__all__ = ['train', 'test']
URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data'
MD5 = 'd4accdce7a25600298819f8e28e8d593'
feature_names = [
'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',
'PTRATIO', 'B', 'LSTAT', 'convert'
]
UCI_TRAIN_DATA = None
UCI_TEST_DATA = None
URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fit_a_line.tar'
MD5_MODEL = '52fc3da8ef3937822fcdd87ee05c0c9b'
def feature_range(maximums, minimums):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
feature_num = len(maximums)
ax.bar(range(feature_num), maximums - minimums, color='r', align='center')
ax.set_title('feature scale')
plt.xticks(range(feature_num), feature_names)
plt.xlim([-1, feature_num])
fig.set_figheight(6)
fig.set_figwidth(10)
if not os.path.exists('./image'):
os.makedirs('./image')
fig.savefig('image/ranges.png', dpi=48)
plt.close(fig)
def load_data(filename, feature_num=14, ratio=0.8):
global UCI_TRAIN_DATA, UCI_TEST_DATA
if UCI_TRAIN_DATA is not None and UCI_TEST_DATA is not None:
return
data = np.fromfile(filename, sep=' ')
data = data.reshape(data.shape[0] / feature_num, feature_num)
maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum(
axis=0) / data.shape[0]
feature_range(maximums[:-1], minimums[:-1])
for i in xrange(feature_num - 1):
data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])
offset = int(data.shape[0] * ratio)
UCI_TRAIN_DATA = data[:offset]
UCI_TEST_DATA = data[offset:]
def train():
"""
UCI_HOUSING training set creator.
It returns a reader creator, each sample in the reader is features after
normalization and price number.
:return: Training reader creator
:rtype: callable
"""
global UCI_TRAIN_DATA
load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))
def reader():
for d in UCI_TRAIN_DATA:
yield d[:-1], d[-1:]
return reader
def test():
"""
UCI_HOUSING test set creator.
It returns a reader creator, each sample in the reader is features after
normalization and price number.
:return: Test reader creator
:rtype: callable
"""
global UCI_TEST_DATA
load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))
def reader():
for d in UCI_TEST_DATA:
yield d[:-1], d[-1:]
return reader
def fetch():
paddle.dataset.common.download(URL, 'uci_housing', MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.dataset.common.convert(path, train(), 1000, "uci_housing_train")
paddle.dataset.common.convert(path, test(), 1000, "uci_houseing_test")
| apache-2.0 |
mindriot101/bokeh | bokeh/document/events.py | 3 | 28467 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide events that represent various changes to Bokeh Documents.
These events are used internally to signal changes to Documents. For
information about user-facing (e.g. UI or tool) events, see the reference
for :ref:`bokeh.events`.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from ..util.dependencies import import_optional
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
pd = import_optional('pandas')
__all__ = (
'ColumnDataChangedEvent',
'ColumnsStreamedEvent',
'ColumnsPatchedEvent',
'DocumentChangedEvent',
'DocumentPatchedEvent',
'ModelChangedEvent',
'RootAddedEvent',
'RootRemovedEvent',
'SessionCallbackAdded',
'SessionCallbackRemoved',
'TitleChangedEvent',
'TitleChangedEvent',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class DocumentChangedEvent(object):
''' Base class for all internal events representing a change to a
Bokeh Document.
'''
def __init__(self, document, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
In the context of a Bokeh server application, incoming updates
to properties will be annotated with the session that is
doing the updating. This value is propagated through any
subsequent change notifications that the update triggers.
The session can compare the event setter to itself, and
suppress any updates that originate from itself.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
self.document = document
self.setter = setter
self.callback_invoker = callback_invoker
def combine(self, event):
'''
'''
return False
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._document_changed`` if it exists.
'''
if hasattr(receiver, '_document_changed'):
receiver._document_changed(self)
class DocumentPatchedEvent(DocumentChangedEvent):
''' A Base class for events that represent updating Bokeh Models and
their properties.
'''
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._document_patched`` if it exists.
'''
super(DocumentPatchedEvent, self).dispatch(receiver)
if hasattr(receiver, '_document_patched'):
receiver._document_patched(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
*Sub-classes must implement this method.*
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
raise NotImplementedError()
class ModelChangedEvent(DocumentPatchedEvent):
''' A concrete event representing updating an attribute and value of a
specific Bokeh Model.
This is the "standard" way of updating most Bokeh model attributes. For
special casing situations that can optimized (e.g. streaming, etc.), a
``hint`` may be supplied that overrides normal mechanisms.
'''
def __init__(self, document, model, attr, old, new, serializable_new, hint=None, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
model (Model) :
A Model to update
attr (str) :
The name of the attribute to update on the model.
old (object) :
The old value of the attribute
new (object) :
The new value of the attribute
serializable_new (object) :
A serialized (JSON) version of the new value. It may be
``None`` if a hint is supplied.
hint (DocumentPatchedEvent, optional) :
When appropriate, a secondary event may be supplied that
modifies the normal update process. For example, in order
to stream or patch data more efficiently than the standard
update mechanism.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
if setter is None and isinstance(hint, (ColumnsStreamedEvent, ColumnsPatchedEvent)):
setter = hint.setter
super(ModelChangedEvent, self).__init__(document, setter, callback_invoker)
self.model = model
self.attr = attr
self.old = old
self.new = new
self.serializable_new = serializable_new
self.hint = hint
def combine(self, event):
'''
'''
if not isinstance(event, ModelChangedEvent): return False
# If these are not true something weird is going on, maybe updates from
# Python bokeh.client, don't try to combine
if self.setter != event.setter: return False
if self.document != event.document: return False
if self.hint:
return self.hint.combine(event.hint)
if (self.model == event.model) and (self.attr == event.attr):
self.new = event.new
self.serializable_new = event.serializable_new
self.callback_invoker = event.callback_invoker
return True
return False
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._document_model_dhanged`` if it
exists.
'''
super(ModelChangedEvent, self).dispatch(receiver)
if hasattr(receiver, '_document_model_changed'):
receiver._document_model_changed(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
from ..model import collect_models
if self.hint is not None:
return self.hint.generate(references, buffers)
value = self.serializable_new
# the new value is an object that may have
# not-yet-in-the-remote-doc references, and may also
# itself not be in the remote doc yet. the remote may
# already have some of the references, but
# unfortunately we don't have an easy way to know
# unless we were to check BEFORE the attr gets changed
# (we need the old _all_models before setting the
# property). So we have to send all the references the
# remote could need, even though it could be inefficient.
# If it turns out we need to fix this we could probably
# do it by adding some complexity.
value_refs = set(collect_models(value))
# we know we don't want a whole new copy of the obj we're patching
# unless it's also the new value
if self.model != value:
value_refs.discard(self.model)
references.update(value_refs)
return { 'kind' : 'ModelChanged',
'model' : self.model.ref,
'attr' : self.attr,
'new' : value }
class ColumnDataChangedEvent(DocumentPatchedEvent):
''' A concrete event representing efficiently replacing *all*
existing data for a :class:`~bokeh.models.sources.ColumnDataSource`
'''
def __init__(self, document, column_source, cols=None, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
column_source (ColumnDataSource) :
cols (list[str]) :
optional explicit list of column names to update. If None, all
columns will be updated (default: None)
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super(ColumnDataChangedEvent, self).__init__(document, setter, callback_invoker)
self.column_source = column_source
self.cols = cols
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._column_data_changed`` if it exists.
'''
super(ColumnDataChangedEvent, self).dispatch(receiver)
if hasattr(receiver, '_column_data_changed'):
receiver._column_data_changed(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'ColumnDataChanged'
'column_source' : <reference to a CDS>
'new' : <new data to steam to column_source>
'cols' : <specific columns to update>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
from ..util.serialization import transform_column_source_data
data_dict = transform_column_source_data(self.column_source.data, buffers=buffers, cols=self.cols)
return { 'kind' : 'ColumnDataChanged',
'column_source' : self.column_source.ref,
'new' : data_dict,
'cols' : self.cols}
class ColumnsStreamedEvent(DocumentPatchedEvent):
''' A concrete event representing efficiently streaming new data
to a :class:`~bokeh.models.sources.ColumnDataSource`
'''
def __init__(self, document, column_source, data, rollover, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
column_source (ColumnDataSource) :
The data source to stream new data to.
data (dict or DataFrame) :
New data to stream.
If a DataFrame, will be stored as ``{c: df[c] for c in df.columns}``
rollover (int) :
A rollover limit. If the data source columns exceed this
limit, earlier values will be discarded to maintain the
column length under the limit.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super(ColumnsStreamedEvent, self).__init__(document, setter, callback_invoker)
self.column_source = column_source
if pd and isinstance(data, pd.DataFrame):
data = {c: data[c] for c in data.columns}
self.data = data
self.rollover = rollover
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._columns_streamed`` if it exists.
'''
super(ColumnsStreamedEvent, self).dispatch(receiver)
if hasattr(receiver, '_columns_streamed'):
receiver._columns_streamed(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'ColumnsStreamed'
'column_source' : <reference to a CDS>
'data' : <new data to steam to column_source>
'rollover' : <rollover limit>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
return { 'kind' : 'ColumnsStreamed',
'column_source' : self.column_source.ref,
'data' : self.data,
'rollover' : self.rollover }
class ColumnsPatchedEvent(DocumentPatchedEvent):
''' A concrete event representing efficiently applying data patches
to a :class:`~bokeh.models.sources.ColumnDataSource`
'''
def __init__(self, document, column_source, patches, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
column_source (ColumnDataSource) :
The data source to apply patches to.
patches (list) :
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super(ColumnsPatchedEvent, self).__init__(document, setter, callback_invoker)
self.column_source = column_source
self.patches = patches
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._columns_patched`` if it exists.
'''
super(ColumnsPatchedEvent, self).dispatch(receiver)
if hasattr(receiver, '_columns_patched'):
receiver._columns_patched(self)
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'ColumnsPatched'
'column_source' : <reference to a CDS>
'patches' : <patches to apply to column_source>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
return { 'kind' : 'ColumnsPatched',
'column_source' : self.column_source.ref,
'patches' : self.patches }
class TitleChangedEvent(DocumentPatchedEvent):
''' A concrete event representing a change to the title of a Bokeh
Document.
'''
def __init__(self, document, title, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
title (str) :
The new title to set on the Document
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super(TitleChangedEvent, self).__init__(document, setter, callback_invoker)
self.title = title
def combine(self, event):
'''
'''
if not isinstance(event, TitleChangedEvent): return False
# If these are not true something weird is going on, maybe updates from
# Python bokeh.client, don't try to combine
if self.setter != event.setter: return False
if self.document != event.document: return False
self.title = event.title
self.callback_invoker = event.callback_invoker
return True
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'TitleChanged'
'title' : <new title to set>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
return { 'kind' : 'TitleChanged',
'title' : self.title }
class RootAddedEvent(DocumentPatchedEvent):
''' A concrete event representing a change to add a new Model to a
Document's collection of "root" models.
'''
def __init__(self, document, model, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
model (Model) :
The Bokeh Model to add as a Document root.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super(RootAddedEvent, self).__init__(document, setter, callback_invoker)
self.model = model
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'RootAdded'
'title' : <reference to a Model>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
references.update(self.model.references())
return { 'kind' : 'RootAdded',
'model' : self.model.ref }
class RootRemovedEvent(DocumentPatchedEvent):
''' A concrete event representing a change to remove an existing Model
from a Document's collection of "root" models.
'''
def __init__(self, document, model, setter=None, callback_invoker=None):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
model (Model) :
The Bokeh Model to remove as a Document root.
setter (ClientSession or ServerSession or None, optional) :
This is used to prevent "boomerang" updates to Bokeh apps.
(default: None)
See :class:`~bokeh.document.events.DocumentChangedEvent`
for more details.
callback_invoker (callable, optional) :
A callable that will invoke any Model callbacks that should
be executed in response to the change that triggered this
event. (default: None)
'''
super(RootRemovedEvent, self).__init__(document, setter, callback_invoker)
self.model = model
def generate(self, references, buffers):
''' Create a JSON representation of this event suitable for sending
to clients.
.. code-block:: python
{
'kind' : 'RootRemoved'
'title' : <reference to a Model>
}
Args:
references (dict[str, Model]) :
If the event requires references to certain models in order to
function, they may be collected here.
**This is an "out" parameter**. The values it contains will be
modified in-place.
buffers (set) :
If the event needs to supply any additional Bokeh protocol
buffers, they may be added to this set.
**This is an "out" parameter**. The values it contains will be
modified in-place.
'''
return { 'kind' : 'RootRemoved',
'model' : self.model.ref }
class SessionCallbackAdded(DocumentChangedEvent):
''' A concrete event representing a change to add a new callback (e.g.
periodic, timeout, or "next tick") to a Document.
'''
def __init__(self, document, callback):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
callback (SessionCallback) :
The callback to add
'''
super(SessionCallbackAdded, self).__init__(document)
self.callback = callback
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_added`` if
it exists.
'''
super(SessionCallbackAdded, self).dispatch(receiver)
if hasattr(receiver, '_session_callback_added'):
receiver._session_callback_added(self)
class SessionCallbackRemoved(DocumentChangedEvent):
''' A concrete event representing a change to remove an existing callback
(e.g. periodic, timeout, or "next tick") from a Document.
'''
def __init__(self, document, callback):
'''
Args:
document (Document) :
A Bokeh document that is to be updated.
callback (SessionCallback) :
The callback to remove
'''
super(SessionCallbackRemoved, self).__init__(document)
self.callback = callback
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_removed`` if
it exists.
'''
super(SessionCallbackRemoved, self).dispatch(receiver)
if hasattr(receiver, '_session_callback_removed'):
receiver._session_callback_removed(self)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
nkhuyu/blaze | blaze/server/server.py | 10 | 11382 | from __future__ import absolute_import, division, print_function
import socket
import functools
import re
import flask
from flask import Blueprint, Flask, request, Response
try:
from bokeh.server.crossdomain import crossdomain
except ImportError:
def crossdomain(*args, **kwargs):
def wrapper(f):
@functools.wraps(f)
def wrapped(*a, **k):
return f(*a, **k)
return wrapped
return wrapper
from toolz import assoc
from datashape import Mono, discover
from datashape.predicates import iscollection, isscalar
from odo import odo
import blaze
from blaze import compute
from blaze.expr import utils as expr_utils
from blaze.compute import compute_up
from .serialization import json, all_formats
from ..interactive import InteractiveSymbol, coerce_scalar
from ..expr import Expr, symbol
__all__ = 'Server', 'to_tree', 'from_tree'
# http://www.speedguide.net/port.php?port=6363
# http://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers
DEFAULT_PORT = 6363
api = Blueprint('api', __name__)
pickle_extension_api = Blueprint('pickle_extension_api', __name__)
_no_default = object() # sentinel
def _get_option(option, options, default=_no_default):
try:
return options[option]
except KeyError:
if default is not _no_default:
return default
# Provides a more informative error message.
raise TypeError(
'The blaze api must be registered with {option}'.format(
option=option,
),
)
def _register_api(app, options, first_registration=False):
"""
Register the data with the blueprint.
"""
_get_data.cache[app] = _get_option('data', options)
_get_format.cache[app] = dict(
(f.name, f) for f in _get_option('formats', options)
)
_get_auth.cache[app] = (
_get_option('authorization', options, None) or (lambda a: True)
)
# Call the original register function.
Blueprint.register(api, app, options, first_registration)
api.register = _register_api
def _get_data():
"""
Retrieve the current application's data for use in the blaze server
endpoints.
"""
return _get_data.cache[flask.current_app]
_get_data.cache = {}
def _get_format(name):
return _get_format.cache[flask.current_app][name]
_get_format.cache = {}
def _get_auth():
return _get_auth.cache[flask.current_app]
_get_auth.cache = {}
def authorization(f):
@functools.wraps(f)
def authorized(*args, **kwargs):
if not _get_auth()(request.authorization):
return Response(
'bad auth token',
401,
{'WWW-Authenticate': 'Basic realm="Login Required"'},
)
return f(*args, **kwargs)
return authorized
class Server(object):
""" Blaze Data Server
Host local data through a web API
Parameters
----------
data : dict, optional
A dictionary mapping dataset name to any data format that blaze
understands.
formats : iterable, optional
An iterable of supported serialization formats. By default, the
server will support JSON.
A serialization format is an object that supports:
name, loads, and dumps.
authorization : callable, optional
A callable to be used to check the auth header from the client.
This callable should accept a single argument that will either be
None indicating that no header was passed, or an object
containing a username and password attribute. By default, all requests
are allowed.
Examples
--------
>>> from pandas import DataFrame
>>> df = DataFrame([[1, 'Alice', 100],
... [2, 'Bob', -200],
... [3, 'Alice', 300],
... [4, 'Dennis', 400],
... [5, 'Bob', -500]],
... columns=['id', 'name', 'amount'])
>>> server = Server({'accounts': df})
>>> server.run() # doctest: +SKIP
"""
__slots__ = 'app', 'data', 'port'
def __init__(self, data=None, formats=None, authorization=None):
app = self.app = Flask('blaze.server.server')
if data is None:
data = dict()
app.register_blueprint(
api,
data=data,
formats=formats if formats is not None else (json,),
authorization=authorization,
)
self.data = data
def run(self, *args, **kwargs):
"""Run the server"""
port = kwargs.pop('port', DEFAULT_PORT)
self.port = port
try:
self.app.run(*args, port=port, **kwargs)
except socket.error:
print("\tOops, couldn't connect on port %d. Is it busy?" % port)
if kwargs.get('retry', True):
# Attempt to start the server on a new port.
self.run(*args, **assoc(kwargs, 'port', port + 1))
@api.route('/datashape', methods=['GET'])
@crossdomain(origin='*', methods=['GET'])
@authorization
def shape():
return str(discover(_get_data()))
def to_tree(expr, names=None):
""" Represent Blaze expression with core data structures
Transform a Blaze expression into a form using only strings, dicts, lists
and base types (int, float, datetime, ....) This form can be useful for
serialization.
Parameters
----------
expr : Expr
A Blaze expression
Examples
--------
>>> t = symbol('t', 'var * {x: int32, y: int32}')
>>> to_tree(t) # doctest: +SKIP
{'op': 'Symbol',
'args': ['t', 'var * { x : int32, y : int32 }', False]}
>>> to_tree(t.x.sum()) # doctest: +SKIP
{'op': 'sum',
'args': [
{'op': 'Column',
'args': [
{
'op': 'Symbol'
'args': ['t', 'var * { x : int32, y : int32 }', False]
}
'x']
}]
}
Simplify expresion using explicit ``names`` dictionary. In the example
below we replace the ``Symbol`` node with the string ``'t'``.
>>> tree = to_tree(t.x, names={t: 't'})
>>> tree # doctest: +SKIP
{'op': 'Column', 'args': ['t', 'x']}
>>> from_tree(tree, namespace={'t': t})
t.x
See Also
--------
from_tree
"""
if names and expr in names:
return names[expr]
if isinstance(expr, tuple):
return [to_tree(arg, names=names) for arg in expr]
if isinstance(expr, expr_utils._slice):
return to_tree(expr.as_slice(), names=names)
if isinstance(expr, slice):
return {'op': 'slice',
'args': [to_tree(arg, names=names) for arg in
[expr.start, expr.stop, expr.step]]}
elif isinstance(expr, Mono):
return str(expr)
elif isinstance(expr, InteractiveSymbol):
return to_tree(symbol(expr._name, expr.dshape), names)
elif isinstance(expr, Expr):
return {'op': type(expr).__name__,
'args': [to_tree(arg, names) for arg in expr._args]}
else:
return expr
def expression_from_name(name):
"""
>>> expression_from_name('By')
<class 'blaze.expr.split_apply_combine.By'>
>>> expression_from_name('And')
<class 'blaze.expr.arithmetic.And'>
"""
import blaze
if hasattr(blaze, name):
return getattr(blaze, name)
if hasattr(blaze.expr, name):
return getattr(blaze.expr, name)
for signature, func in compute_up.funcs.items():
try:
if signature[0].__name__ == name:
return signature[0]
except TypeError:
pass
raise ValueError('%s not found in compute_up' % name)
def from_tree(expr, namespace=None):
""" Convert core data structures to Blaze expression
Core data structure representations created by ``to_tree`` are converted
back into Blaze expressions.
Parameters
----------
expr : dict
Examples
--------
>>> t = symbol('t', 'var * {x: int32, y: int32}')
>>> tree = to_tree(t)
>>> tree # doctest: +SKIP
{'op': 'Symbol',
'args': ['t', 'var * { x : int32, y : int32 }', False]}
>>> from_tree(tree)
t
>>> tree = to_tree(t.x.sum())
>>> tree # doctest: +SKIP
{'op': 'sum',
'args': [
{'op': 'Field',
'args': [
{
'op': 'Symbol'
'args': ['t', 'var * { x : int32, y : int32 }', False]
}
'x']
}]
}
>>> from_tree(tree)
sum(t.x)
Simplify expresion using explicit ``names`` dictionary. In the example
below we replace the ``Symbol`` node with the string ``'t'``.
>>> tree = to_tree(t.x, names={t: 't'})
>>> tree # doctest: +SKIP
{'op': 'Field', 'args': ['t', 'x']}
>>> from_tree(tree, namespace={'t': t})
t.x
See Also
--------
to_tree
"""
if isinstance(expr, dict):
op, args = expr['op'], expr['args']
if 'slice' == op:
return expr_utils._slice(*[from_tree(arg, namespace)
for arg in args])
if hasattr(blaze.expr, op):
cls = getattr(blaze.expr, op)
else:
cls = expression_from_name(op)
if 'Symbol' in op:
children = [from_tree(arg) for arg in args]
else:
children = [from_tree(arg, namespace) for arg in args]
return cls(*children)
elif isinstance(expr, list):
return tuple(from_tree(arg, namespace) for arg in expr)
if namespace and expr in namespace:
return namespace[expr]
else:
return expr
mimetype_regex = re.compile(r'^application/vnd\.blaze\+(%s)$' %
'|'.join(x.name for x in all_formats))
@api.route('/compute', methods=['POST', 'HEAD', 'OPTIONS'])
@crossdomain(origin='*', methods=['POST', 'HEAD', 'OPTIONS'])
@authorization
def compserver():
content_type = request.headers['content-type']
matched = mimetype_regex.match(content_type)
if matched is None:
return 'Unsupported serialization format %s' % content_type, 415
try:
serial = _get_format(matched.groups()[0])
except KeyError:
return (
"Unsupported serialization format '%s'" % matched.groups()[0],
415,
)
try:
payload = serial.loads(request.data)
except ValueError:
return ("Bad data. Got %s " % request.data, 400) # 400: Bad Request
ns = payload.get('namespace', dict())
dataset = _get_data()
ns[':leaf'] = symbol('leaf', discover(dataset))
expr = from_tree(payload['expr'], namespace=ns)
assert len(expr._leaves()) == 1
leaf = expr._leaves()[0]
try:
result = compute(expr, {leaf: dataset})
if iscollection(expr.dshape):
result = odo(result, list)
elif isscalar(expr.dshape):
result = coerce_scalar(result, str(expr.dshape))
except NotImplementedError as e:
# 501: Not Implemented
return ("Computation not supported:\n%s" % e, 501)
except Exception as e:
# 500: Internal Server Error
return ("Computation failed with message:\n%s" % e, 500)
return serial.dumps({
'datashape': str(expr.dshape),
'data': result,
'names': expr.fields
})
| bsd-3-clause |
massmutual/scikit-learn | sklearn/feature_selection/tests/test_feature_select.py | 103 | 22297 | """
Todo: cross-check the F-value with stats model
"""
from __future__ import division
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
for alpha in [0.001, 0.01, 0.1]:
for n_informative in [1, 5, 10]:
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(30)])
assert_greater_equal(alpha, false_discovery_rate)
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert_greater(false_discovery_rate, alpha / 10)
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| bsd-3-clause |
maximus009/kaggle-galaxies | try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.py | 7 | 17439 | import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
layers.dump_params(l6, e=e)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
| bsd-3-clause |
shusenl/scikit-learn | sklearn/utils/extmath.py | 70 | 21951 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
aubreyli/hmmlearn | hmmlearn/base.py | 1 | 23398 | from __future__ import print_function
import string
import sys
from collections import deque
import numpy as np
from scipy.misc import logsumexp
from sklearn.base import BaseEstimator, _pprint
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import check_is_fitted
from . import _hmmc
from .utils import normalize, log_normalize, iter_from_X_lengths, log_mask_zero
#: Supported decoder algorithms.
DECODER_ALGORITHMS = frozenset(("viterbi", "map"))
class ConvergenceMonitor(object):
"""Monitors and reports convergence to :data:`sys.stderr`.
Parameters
----------
tol : double
Convergence threshold. EM has converged either if the maximum
number of iterations is reached or the log probability
improvement between the two consecutive iterations is less
than threshold.
n_iter : int
Maximum number of iterations to perform.
verbose : bool
If ``True`` then per-iteration convergence reports are printed,
otherwise the monitor is mute.
Attributes
----------
history : deque
The log probability of the data for the last two training
iterations. If the values are not strictly increasing, the
model did not converge.
iter : int
Number of iterations performed while training the model.
"""
_template = "{iter:>10d} {logprob:>16.4f} {delta:>+16.4f}"
def __init__(self, tol, n_iter, verbose):
self.tol = tol
self.n_iter = n_iter
self.verbose = verbose
self.history = deque(maxlen=2)
self.iter = 0
def __repr__(self):
class_name = self.__class__.__name__
params = dict(vars(self), history=list(self.history))
return "{0}({1})".format(
class_name, _pprint(params, offset=len(class_name)))
def report(self, logprob):
"""Reports convergence to :data:`sys.stderr`.
The output consists of three columns: iteration number, log
probability of the data at the current iteration and convergence
rate. At the first iteration convergence rate is unknown and
is thus denoted by NaN.
Parameters
----------
logprob : float
The log probability of the data as computed by EM algorithm
in the current iteration.
"""
if self.verbose:
delta = logprob - self.history[-1] if self.history else np.nan
message = self._template.format(
iter=self.iter + 1, logprob=logprob, delta=delta)
print(message, file=sys.stderr)
self.history.append(logprob)
self.iter += 1
@property
def converged(self):
"""``True`` if the EM algorithm converged and ``False`` otherwise."""
# XXX we might want to check that ``logprob`` is non-decreasing.
return (self.iter == self.n_iter or
(len(self.history) == 2 and
self.history[1] - self.history[0] < self.tol))
class _BaseHMM(BaseEstimator):
"""Base class for Hidden Markov Models.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Parameters
----------
n_components : int
Number of states in the model.
startprob_prior : array, shape (n_components, )
Initial state occupation prior distribution.
transmat_prior : array, shape (n_components, n_components)
Matrix of prior transition probabilities between states.
algorithm : string
Decoder algorithm. Must be one of "viterbi" or "map".
Defaults to "viterbi".
random_state: RandomState or an int seed
A random number generator instance.
n_iter : int, optional
Maximum number of iterations to perform.
tol : float, optional
Convergence threshold. EM will stop if the gain in log-likelihood
is below this value.
verbose : bool, optional
When ``True`` per-iteration convergence reports are printed
to :data:`sys.stderr`. You can diagnose convergence via the
:attr:`monitor_` attribute.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emission parameters. Defaults to all
parameters.
Attributes
----------
monitor\_ : ConvergenceMonitor
Monitor object used to check the convergence of EM.
startprob\_ : array, shape (n_components, )
Initial state occupation distribution.
transmat\_ : array, shape (n_components, n_components)
Matrix of transition probabilities between states.
"""
def __init__(self, n_components=1,
startprob_prior=1.0, transmat_prior=1.0,
algorithm="viterbi", random_state=None,
n_iter=10, tol=1e-2, verbose=False,
params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.params = params
self.init_params = init_params
self.startprob_prior = startprob_prior
self.transmat_prior = transmat_prior
self.algorithm = algorithm
self.random_state = random_state
self.n_iter = n_iter
self.tol = tol
self.verbose = verbose
def score_samples(self, X, lengths=None):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
logprob : float
Log likelihood of ``X``.
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample in ``X``.
See Also
--------
score : Compute the log probability under the model.
decode : Find most likely state sequence corresponding to ``X``.
"""
check_is_fitted(self, "startprob_")
self._check()
X = check_array(X)
n_samples = X.shape[0]
logprob = 0
posteriors = np.zeros((n_samples, self.n_components))
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprobij, fwdlattice = self._do_forward_pass(framelogprob)
logprob += logprobij
bwdlattice = self._do_backward_pass(framelogprob)
posteriors[i:j] = self._compute_posteriors(fwdlattice, bwdlattice)
return logprob, posteriors
def score(self, X, lengths=None):
"""Compute the log probability under the model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
logprob : float
Log likelihood of ``X``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
decode : Find most likely state sequence corresponding to ``X``.
"""
check_is_fitted(self, "startprob_")
self._check()
X = check_array(X)
# XXX we can unroll forward pass for speed and memory efficiency.
logprob = 0
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprobij, _fwdlattice = self._do_forward_pass(framelogprob)
logprob += logprobij
return logprob
def _decode_viterbi(self, X):
framelogprob = self._compute_log_likelihood(X)
return self._do_viterbi_pass(framelogprob)
def _decode_map(self, X):
_, posteriors = self.score_samples(X)
logprob = np.max(posteriors, axis=1).sum()
state_sequence = np.argmax(posteriors, axis=1)
return logprob, state_sequence
def decode(self, X, lengths=None, algorithm=None):
"""Find most likely state sequence corresponding to ``X``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
algorithm : string
Decoder algorithm. Must be one of "viterbi" or "map".
If not given, :attr:`decoder` is used.
Returns
-------
logprob : float
Log probability of the produced state sequence.
state_sequence : array, shape (n_samples, )
Labels for each sample from ``X`` obtained via a given
decoder ``algorithm``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
check_is_fitted(self, "startprob_")
self._check()
algorithm = algorithm or self.algorithm
if algorithm not in DECODER_ALGORITHMS:
raise ValueError("Unknown decoder {0!r}".format(algorithm))
decoder = {
"viterbi": self._decode_viterbi,
"map": self._decode_map
}[algorithm]
X = check_array(X)
n_samples = X.shape[0]
logprob = 0
state_sequence = np.empty(n_samples, dtype=int)
for i, j in iter_from_X_lengths(X, lengths):
# XXX decoder works on a single sample at a time!
logprobij, state_sequenceij = decoder(X[i:j])
logprob += logprobij
state_sequence[i:j] = state_sequenceij
return logprob, state_sequence
def predict(self, X, lengths=None):
"""Find most likely state sequence corresponding to ``X``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
state_sequence : array, shape (n_samples, )
Labels for each sample from ``X``.
"""
_, state_sequence = self.decode(X, lengths)
return state_sequence
def predict_proba(self, X, lengths=None):
"""Compute the posterior probability for each state in the model.
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample from ``X``.
"""
_, posteriors = self.score_samples(X, lengths)
return posteriors
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int
Number of samples to generate.
random_state : RandomState or an int seed
A random number generator instance. If ``None``, the object's
``random_state`` is used.
Returns
-------
X : array, shape (n_samples, n_features)
Feature matrix.
state_sequence : array, shape (n_samples, )
State sequence produced by the model.
"""
check_is_fitted(self, "startprob_")
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_cdf = np.cumsum(self.startprob_)
transmat_cdf = np.cumsum(self.transmat_, axis=1)
currstate = (startprob_cdf > random_state.rand()).argmax()
state_sequence = [currstate]
X = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for t in range(n_samples - 1):
currstate = (transmat_cdf[currstate] > random_state.rand()) \
.argmax()
state_sequence.append(currstate)
X.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.atleast_2d(X), np.array(state_sequence, dtype=int)
def fit(self, X, lengths=None):
"""Estimate model parameters.
An initialization step is performed before entering the
EM algorithm. If you want to avoid this step for a subset of
the parameters, pass proper ``init_params`` keyword argument
to estimator's constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, )
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
self._init(X, lengths=lengths)
self._check()
self.monitor_ = ConvergenceMonitor(self.tol, self.n_iter, self.verbose)
for iter in range(self.n_iter):
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprob, fwdlattice = self._do_forward_pass(framelogprob)
curr_logprob += logprob
bwdlattice = self._do_backward_pass(framelogprob)
posteriors = self._compute_posteriors(fwdlattice, bwdlattice)
self._accumulate_sufficient_statistics(
stats, X[i:j], framelogprob, posteriors, fwdlattice,
bwdlattice)
# XXX must be before convergence check, because otherwise
# there won't be any updates for the case ``n_iter=1``.
self._do_mstep(stats)
self.monitor_.report(curr_logprob)
if self.monitor_.converged:
break
return self
def _do_viterbi_pass(self, framelogprob):
n_samples, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_samples, n_components, log_mask_zero(self.startprob_),
log_mask_zero(self.transmat_), framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_samples, n_components = framelogprob.shape
fwdlattice = np.zeros((n_samples, n_components))
_hmmc._forward(n_samples, n_components,
log_mask_zero(self.startprob_),
log_mask_zero(self.transmat_),
framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_samples, n_components = framelogprob.shape
bwdlattice = np.zeros((n_samples, n_components))
_hmmc._backward(n_samples, n_components,
log_mask_zero(self.startprob_),
log_mask_zero(self.transmat_),
framelogprob, bwdlattice)
return bwdlattice
def _compute_posteriors(self, fwdlattice, bwdlattice):
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
log_gamma = fwdlattice + bwdlattice
log_normalize(log_gamma, axis=1)
return np.exp(log_gamma)
def _init(self, X, lengths):
"""Initializes model parameters prior to fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, )
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
"""
init = 1. / self.n_components
if 's' in self.init_params or not hasattr(self, "startprob_"):
self.startprob_ = np.full(self.n_components, init)
if 't' in self.init_params or not hasattr(self, "transmat_"):
self.transmat_ = np.full((self.n_components, self.n_components),
init)
def _check(self):
"""Validates model parameters prior to fitting.
Raises
------
ValueError
If any of the parameters are invalid, e.g. if :attr:`startprob_`
don't sum to 1.
"""
self.startprob_ = np.asarray(self.startprob_)
if len(self.startprob_) != self.n_components:
raise ValueError("startprob_ must have length n_components")
if not np.allclose(self.startprob_.sum(), 1.0):
raise ValueError("startprob_ must sum to 1.0 (got {0:.4f})"
.format(self.startprob_.sum()))
self.transmat_ = np.asarray(self.transmat_)
if self.transmat_.shape != (self.n_components, self.n_components):
raise ValueError(
"transmat_ must have shape (n_components, n_components)")
if not np.allclose(self.transmat_.sum(axis=1), 1.0):
raise ValueError("rows of transmat_ must sum to 1.0 (got {0})"
.format(self.transmat_.sum(axis=1)))
def _compute_log_likelihood(self, X):
"""Computes per-component log probability under the model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
Returns
-------
logprob : array, shape (n_samples, n_components)
Log probability of each sample in ``X`` for each of the
model states.
"""
def _generate_sample_from_state(self, state, random_state=None):
"""Generates a random sample from a given component.
Parameters
----------
state : int
Index of the component to condition on.
random_state: RandomState or an int seed
A random number generator instance. If ``None``, the object's
``random_state`` is used.
Returns
-------
X : array, shape (n_features, )
A random sample from the emission distribution corresponding
to a given component.
"""
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
"""Initializes sufficient statistics required for M-step.
The method is *pure*, meaning that it doesn't change the state of
the instance. For extensibility computed statistics are stored
in a dictionary.
Returns
-------
nobs : int
Number of samples in the data.
start : array, shape (n_components, )
An array where the i-th element corresponds to the posterior
probability of the first sample being generated by the i-th
state.
trans : array, shape (n_components, n_components)
An array where the (i, j)-th element corresponds to the
posterior probability of transitioning between the i-th to j-th
states.
"""
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, X, framelogprob,
posteriors, fwdlattice, bwdlattice):
"""Updates sufficient statistics from a given sample.
Parameters
----------
stats : dict
Sufficient statistics as returned by
:meth:`~base._BaseHMM._initialize_sufficient_statistics`.
X : array, shape (n_samples, n_features)
Sample sequence.
framelogprob : array, shape (n_samples, n_components)
Log-probabilities of each sample under each of the model states.
posteriors : array, shape (n_samples, n_components)
Posterior probabilities of each sample being generated by each
of the model states.
fwdlattice, bwdlattice : array, shape (n_samples, n_components)
Log-forward and log-backward probabilities.
"""
stats['nobs'] += 1
if 's' in self.params:
stats['start'] += posteriors[0]
if 't' in self.params:
n_samples, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_samples <= 1:
return
lneta = np.zeros((n_samples - 1, n_components, n_components))
_hmmc._compute_lneta(n_samples, n_components, fwdlattice,
log_mask_zero(self.transmat_),
bwdlattice, framelogprob, lneta)
stats['trans'] += np.exp(logsumexp(lneta, axis=0))
def _do_mstep(self, stats):
"""Performs the M-step of EM algorithm.
Parameters
----------
stats : dict
Sufficient statistics updated from all available samples.
"""
# The ``np.where`` calls guard against updating forbidden states
# or transitions in e.g. a left-right HMM.
if 's' in self.params:
startprob_ = self.startprob_prior - 1.0 + stats['start']
self.startprob_ = np.where(self.startprob_ == 0.0,
self.startprob_, startprob_)
normalize(self.startprob_)
if 't' in self.params:
transmat_ = self.transmat_prior - 1.0 + stats['trans']
self.transmat_ = np.where(self.transmat_ == 0.0,
self.transmat_, transmat_)
normalize(self.transmat_, axis=1)
| bsd-3-clause |
kyleam/seaborn | seaborn/miscplot.py | 34 | 1498 | from __future__ import division
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
def palplot(pal, size=1):
"""Plot the values in a color palette as a horizontal array.
Parameters
----------
pal : sequence of matplotlib colors
colors, i.e. as returned by seaborn.color_palette()
size :
scaling factor for size of plot
"""
n = len(pal)
f, ax = plt.subplots(1, 1, figsize=(n * size, size))
ax.imshow(np.arange(n).reshape(1, n),
cmap=mpl.colors.ListedColormap(list(pal)),
interpolation="nearest", aspect="auto")
ax.set_xticks(np.arange(n) - .5)
ax.set_yticks([-.5, .5])
ax.set_xticklabels([])
ax.set_yticklabels([])
def puppyplot(grown_up=False):
"""Plot today's daily puppy. Only works in the IPython notebook."""
from .external.six.moves.urllib.request import urlopen
from IPython.display import HTML
try:
from bs4 import BeautifulSoup
url = "http://www.dailypuppy.com"
if grown_up:
url += "/dogs"
html_doc = urlopen(url)
soup = BeautifulSoup(html_doc)
puppy = soup.find("div", {"class": "daily_puppy"})
return HTML(str(puppy.img))
except ImportError:
html = ('<img src="http://cdn-www.dailypuppy.com/dog-images/'
'decker-the-nova-scotia-duck-tolling-retriever_'
'72926_2013-11-04_w450.jpg" style="width:450px;"/>')
return HTML(html)
| bsd-3-clause |
jas02/easybuild-easyblocks | easybuild/easyblocks/x/xmipp.py | 10 | 8904 | ##
# Copyright 2015-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Xmipp, implemented as an easyblock
@author: Jens Timmerman (Ghent University)
@author: Pablo Escobar (sciCORE, SIB, University of Basel)
@author: Kenneth Hoste (Ghent University)
"""
import fileinput
import os
import re
import stat
import sys
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.pythonpackage import det_pylibdir
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import adjust_permissions, mkdir, write_file
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
class EB_Xmipp(EasyBlock):
"""
easyblock to install Xmipp
"""
def __init__(self, *args, **kwargs):
"""Easyblock constructor, enable building in installation directory."""
super(EB_Xmipp, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.xmipp_pythonpaths = []
def extract_step(self):
"""Extract Xmipp sources."""
# strip off 'xmipp' part to avoid having everything in a 'xmipp' subdirectory
self.cfg.update('unpack_options', '--strip-components=1')
super(EB_Xmipp, self).extract_step()
def configure_step(self):
"""Set configure options."""
if self.toolchain.mpi_family() == toolchain.INTELMPI:
mpi_bindir = os.path.join(get_software_root('impi'), 'intel64', 'bin')
else:
mpi_bindir = os.path.join(get_software_root(self.toolchain.MPI_MODULE_NAME[0]), 'bin')
root_java = get_software_root("Java")
if not get_software_root("Java"):
raise EasyBuildError("Module for dependency Java not loaded.")
configure_args = ' '.join([
'profile=no fast=yes warn=no release=yes gtest=yes static=no cuda=no debug=no matlab=no',
'LINKERFORPROGRAMS=%s' % os.getenv('CXX'),
'MPI_BINDIR=%s' % mpi_bindir,
'MPI_LIB=mpi',
'JAVA_HOME=%s' % os.getenv('JAVA_HOME'),
'JAVAC=javac',
'CC=%s' % os.getenv('CC'),
# pass $CXXFLAGS in Python list syntax and avoid spaces, e.g.: ['-O2','-march=native']
'CXXFLAGS=%s' % str(os.getenv('CXXFLAGS').split(' ')).replace(' ', ''),
'CXX=%s' % os.getenv('CXX'),
'MPI_CC=%s' % os.getenv('MPICC'),
# pass $CFLAGS in Python list syntax and avoid spaces, e.g.: ['-O2','-march=native']
'CCFLAGS=%s' % str(os.getenv('CFLAGS').split(' ')).replace(' ', ''),
'MPI_CXX=%s' % os.getenv('MPICXX'),
'MPI_INCLUDE=%s' % os.getenv('MPI_INC_DIR'),
'MPI_LIBDIR=%s' % os.getenv('MPI_LIB_DIR'),
'MPI_LINKERFORPROGRAMS=%s' % os.getenv('MPICXX'),
'LIBPATH=%s' % os.getenv('LD_LIBRARY_PATH'),
])
# define list of configure options, which will be passed to Xmipp's install.sh script via --configure-args
self.cfg['configopts'] = configure_args
self.log.info("Configure arguments for Xmipp install.sh script: %s", self.cfg['configopts'])
def build_step(self):
"""No custom build step (see install step)."""
pass
def install_step(self):
"""Build/install Xmipp using provided install.sh script."""
pylibdir = det_pylibdir()
self.xmipp_pythonpaths = [
# location where Python packages will be installed by Xmipp installer
pylibdir,
'protocols',
os.path.join('libraries', 'bindings', 'python'),
]
python_root = get_software_root('Python')
if python_root:
# extend $PYTHONPATH
all_pythonpaths = [os.path.join(self.installdir, p) for p in self.xmipp_pythonpaths]
# required so packages installed as extensions in Pythpn dep are picked up
all_pythonpaths.append(os.path.join(python_root, pylibdir))
all_pythonpaths.append(os.environ.get('PYTHONPATH', ''))
env.setvar('PYTHONPATH', os.pathsep.join(all_pythonpaths))
# location where Python packages will be installed by Xmipp installer must exist already (setuptools)
mkdir(os.path.join(self.installdir, pylibdir), parents=True)
# put dummy xmipp_python script in place if Python is used as a dependency
bindir = os.path.join(self.installdir, 'bin')
mkdir(bindir)
xmipp_python = os.path.join(bindir, 'xmipp_python')
xmipp_python_script_body = '\n'.join([
'#!/bin/sh',
'%s/bin/python "$@"' % python_root,
])
write_file(xmipp_python, xmipp_python_script_body)
adjust_permissions(xmipp_python, stat.S_IXUSR|stat.S_IXGRP|stat.S_IXOTH)
pyshortver = '.'.join(get_software_version('Python').split('.')[:2])
# make sure Python.h and numpy header are found
env.setvar('CPATH', os.pathsep.join([
os.path.join(python_root, 'include', 'python%s' % pyshortver),
os.path.join(python_root, pylibdir, 'numpy', 'core', 'include'),
os.environ.get('CPATH', ''),
]))
cmd_opts = []
# disable (re)building of supplied dependencies
dep_names = [dep['name'] for dep in self.cfg['dependencies']]
for dep in ['FFTW', 'HDF5', ('libjpeg-turbo', 'jpeg'), ('LibTIFF', 'tiff'), 'matplotlib', 'Python', 'SQLite',
'Tcl', 'Tk']:
if isinstance(dep, tuple):
dep, opt = dep
else:
opt = dep.lower()
# don't check via get_software_root, check listed dependencies directly (relevant for FFTW)
if dep in dep_names:
cmd_opts.append('--%s=false' % opt)
# Python should also provide numpy/mpi4py
if dep == 'Python':
cmd_opts.extend(['--numpy=false', '--mpi4py=false'])
if '--tcl=false' in cmd_opts and '--tk=false' in cmd_opts:
cmd_opts.append('--tcl-tk=false')
# patch install.sh script to inject configure options
# setting $CONFIGURE_ARGS or using --configure-args doesn't work...
for line in fileinput.input('install.sh', inplace=1, backup='.orig.eb'):
line = re.sub(r"^CONFIGURE_ARGS.*$", 'CONFIGURE_ARGS="%s"' % self.cfg['configopts'], line)
sys.stdout.write(line)
cmd = './install.sh -j %s --unattended=true %s' % (self.cfg['parallel'], ' '.join(cmd_opts))
out, _ = run_cmd(cmd, log_all=True, simple=False)
if not re.search("Xmipp has been successfully compiled", out):
raise EasyBuildError("Xmipp installation did not complete successfully?")
def sanity_check_step(self):
"""Custom sanity check for Xmipp."""
custom_paths = {
# incomplete list, random picks, cfr. http://xmipp.cnb.csic.es/twiki/bin/view/Xmipp/ListOfProgramsv3
'files': ['bin/xmipp_%s' % x for x in ['compile', 'imagej', 'mpi_run', 'phantom_create',
'transform_filter', 'tomo_project', 'volume_align']],
'dirs': ['lib'],
}
super(EB_Xmipp, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Define Xmipp specific variables in generated module file, i.e. XMIPP_HOME."""
txt = super(EB_Xmipp, self).make_module_extra()
txt += self.module_generator.set_environment('XMIPP_HOME', self.installdir)
txt += self.module_generator.prepend_paths('PYTHONPATH', self.xmipp_pythonpaths)
return txt
| gpl-2.0 |
CDSFinance/zipline | zipline/utils/cli.py | 10 | 8343 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import argparse
from copy import copy
from six import print_
from six.moves import configparser
import pandas as pd
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
PYGMENTS = True
except:
PYGMENTS = False
import zipline
from zipline.errors import NoSourceError, PipelineDateError
DEFAULTS = {
'data_frequency': 'daily',
'capital_base': '10e6',
'source': 'yahoo',
'symbols': 'AAPL',
'metadata_index': 'symbol',
'source_time_column': 'Date',
}
def parse_args(argv, ipython_mode=False):
"""Parse list of arguments.
If a config file is provided (via -c), it will read in the
supplied options and overwrite any global defaults.
All other directly supplied arguments will overwrite the config
file settings.
Arguments:
* argv : list of strings
List of arguments, e.g. ['-c', 'my.conf']
* ipython_mode : bool <default=True>
Whether to parse IPython specific arguments
like --local_namespace
Notes:
Default settings can be found in zipline.utils.cli.DEFAULTS.
"""
# Parse any conf_file specification
# We make this parser with add_help=False so that
# it doesn't parse -h and print help.
conf_parser = argparse.ArgumentParser(
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
# Turn off help, so we print all options in response to -h
add_help=False
)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file",
metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(argv)
defaults = copy(DEFAULTS)
if args.conf_file:
config = configparser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("Defaults")))
# Parse rest of arguments
# Don't suppress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
description="Zipline version %s." % zipline.__version__,
parents=[conf_parser]
)
parser.set_defaults(**defaults)
parser.add_argument('--algofile', '-f')
parser.add_argument('--data-frequency',
choices=('minute', 'daily'))
parser.add_argument('--start', '-s')
parser.add_argument('--end', '-e')
parser.add_argument('--capital_base')
parser.add_argument('--source', '-d', choices=('yahoo',))
parser.add_argument('--source_time_column', '-t')
parser.add_argument('--symbols')
parser.add_argument('--output', '-o')
parser.add_argument('--metadata_path', '-m')
parser.add_argument('--metadata_index', '-x')
parser.add_argument('--print-algo', '-p', dest='print_algo',
action='store_true')
parser.add_argument('--no-print-algo', '-q', dest='print_algo',
action='store_false')
if ipython_mode:
parser.add_argument('--local_namespace', action='store_true')
args = parser.parse_args(remaining_argv)
return(vars(args))
def parse_cell_magic(line, cell):
"""Parse IPython magic
"""
args_list = line.split(' ')
args = parse_args(args_list, ipython_mode=True)
# Remove print_algo kwarg to overwrite below.
args.pop('print_algo')
local_namespace = args.pop('local_namespace', False)
# By default, execute inside IPython namespace
if not local_namespace:
args['namespace'] = get_ipython().user_ns # flake8: noqa
# If we are running inside NB, do not output to file but create a
# variable instead
output_var_name = args.pop('output', None)
perf = run_pipeline(print_algo=False, algo_text=cell, **args)
if output_var_name is not None:
get_ipython().user_ns[output_var_name] = perf # flake8: noqa
def run_pipeline(print_algo=True, **kwargs):
"""Runs a full zipline pipeline given configuration keyword
arguments.
1. Load data (start and end dates can be provided a strings as
well as the source and symobls).
2. Instantiate algorithm (supply either algo_text or algofile
kwargs containing initialize() and handle_data() functions). If
algofile is supplied, will try to look for algofile_analyze.py and
append it.
3. Run algorithm (supply capital_base as float).
4. Return performance dataframe.
:Arguments:
* print_algo : bool <default=True>
Whether to print the algorithm to command line. Will use
pygments syntax coloring if pygments is found.
"""
start = kwargs['start']
end = kwargs['end']
# Compare against None because strings/timestamps may have been given
if start is not None:
start = pd.Timestamp(start, tz='UTC')
if end is not None:
end = pd.Timestamp(end, tz='UTC')
# Fail out if only one bound is provided
if ((start is None) or (end is None)) and (start != end):
raise PipelineDateError(start=start, end=end)
# Check if start and end are provided, and if the sim_params need to read
# a start and end from the DataSource
if start is None:
overwrite_sim_params = True
else:
overwrite_sim_params = False
symbols = kwargs['symbols'].split(',')
asset_identifier = kwargs['metadata_index']
# Pull asset metadata
asset_metadata = kwargs.get('asset_metadata', None)
asset_metadata_path = kwargs['metadata_path']
# Read in a CSV file, if applicable
if asset_metadata_path is not None:
if os.path.isfile(asset_metadata_path):
asset_metadata = pd.read_csv(asset_metadata_path,
index_col=asset_identifier)
source_arg = kwargs['source']
source_time_column = kwargs['source_time_column']
if source_arg is None:
raise NoSourceError()
elif source_arg == 'yahoo':
source = zipline.data.load_bars_from_yahoo(
stocks=symbols, start=start, end=end)
elif os.path.isfile(source_arg):
source = zipline.data.load_prices_from_csv(
filepath=source_arg,
identifier_col=source_time_column
)
elif os.path.isdir(source_arg):
source = zipline.data.load_prices_from_csv_folder(
folderpath=source_arg,
identifier_col=source_time_column
)
else:
raise NotImplementedError(
'Source %s not implemented.' % kwargs['source'])
algo_text = kwargs.get('algo_text', None)
if algo_text is None:
# Expect algofile to be set
algo_fname = kwargs['algofile']
with open(algo_fname, 'r') as fd:
algo_text = fd.read()
if print_algo:
if PYGMENTS:
highlight(algo_text, PythonLexer(), TerminalFormatter(),
outfile=sys.stdout)
else:
print_(algo_text)
algo = zipline.TradingAlgorithm(script=algo_text,
namespace=kwargs.get('namespace', {}),
capital_base=float(kwargs['capital_base']),
algo_filename=kwargs.get('algofile'),
equities_metadata=asset_metadata,
identifiers=symbols,
start=start,
end=end)
perf = algo.run(source, overwrite_sim_params=overwrite_sim_params)
output_fname = kwargs.get('output', None)
if output_fname is not None:
perf.to_pickle(output_fname)
return perf
| apache-2.0 |
liyu1990/sklearn | sklearn/svm/classes.py | 7 | 40216 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/neighbors/tests/test_neighbors.py | 3 | 46375 | from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
from sklearn.exceptions import DataConversionWarning
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
# Test unsupervised neighbors methods
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
# test the types of valid input into NearestNeighbors
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_precomputed(random_state=42):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX = metrics.pairwise_distances(X, metric='euclidean')
DYX = metrics.pairwise_distances(Y, X, metric='euclidean')
for method in ['kneighbors']:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto',
metric='precomputed')
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_array_almost_equal(dist_X, dist_D)
assert_array_almost_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
assert_raises(ValueError, getattr(nbrs_D, method), X)
target = np.arange(X.shape[0])
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
print(Est)
est = Est(metric='euclidean')
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = 'precomputed'
pred_D = est.fit(DXX, target).predict(DYX)
assert_array_almost_equal(pred_X, pred_D)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric='precomputed'), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
# Test KNeighborsClassifier.predict_proba() method
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([[0.0]], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test k-NN classifier on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
# Test k-NN classifier on multioutput data
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
# Test k-neighbors in multi-output regression with uniform weight
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
# Test radius-based neighbors regression
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
# Test radius neighbors in multi-output regression (uniform weight)
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
# Test k-neighbors in multi-output regression with various weight
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
# Test radius-based regression on sparse matrices
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
# Sanity checks on the iris dataset
# Puts three points of each label in the plane and performs a
# nearest neighbor query on points near the decision boundary.
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_greater(np.mean(rgs.predict(iris.data).round() == iris.target),
0.95)
def test_neighbors_digits():
# Sanity check on the digits dataset
# the 'brute' algorithm has been observed to fail if the input
# dtype is uint8 due to overflow in distance calculations.
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
# Test kneighbors_graph to build the k-Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity',
include_self=True)
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity',
include_self=True)
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
# Test kneighbors_graph to build the k-Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
# Test radius_neighbors_graph to build the Nearest Neighbor graph.
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity',
include_self=True)
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
# Test radius_neighbors_graph to build the Nearest Neighbor graph
# for sparse input.
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[[]])
if (isinstance(cls, neighbors.KNeighborsClassifier) or
isinstance(cls, neighbors.KNeighborsRegressor)):
nbrs = cls(n_neighbors=-1)
assert_raises(ValueError, nbrs.fit, X, y)
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah')
assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
# Test computing the neighbors for various metrics
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
def custom_metric(x1, x2):
return np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto',
metric=custom_metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute',
metric=custom_metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric, mode='connectivity',
include_self=True).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A)
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
# Test kneighbors et.al when query is not training data
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
# Test kneighbors et.al when query is None
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
# Test behavior of kneighbors when duplicates are present in query
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
# Test include_self parameter in neighbors_graph
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
def test_same_knn_parallel():
X, y = datasets.make_classification(n_samples=30, n_features=5,
n_redundant=0, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y)
def check_same_knn_parallel(algorithm):
clf = neighbors.KNeighborsClassifier(n_neighbors=3,
algorithm=algorithm)
clf.fit(X_train, y_train)
y = clf.predict(X_test)
dist, ind = clf.kneighbors(X_test)
graph = clf.kneighbors_graph(X_test, mode='distance').toarray()
clf.set_params(n_jobs=3)
clf.fit(X_train, y_train)
y_parallel = clf.predict(X_test)
dist_parallel, ind_parallel = clf.kneighbors(X_test)
graph_parallel = \
clf.kneighbors_graph(X_test, mode='distance').toarray()
assert_array_equal(y, y_parallel)
assert_array_almost_equal(dist, dist_parallel)
assert_array_equal(ind, ind_parallel)
assert_array_almost_equal(graph, graph_parallel)
for algorithm in ALGORITHMS:
yield check_same_knn_parallel, algorithm
def test_dtype_convert():
classifier = neighbors.KNeighborsClassifier(n_neighbors=1)
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y)
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# Non-regression test for #4523
# 'brute': uses scipy.spatial.distance through pairwise_distances
# 'ball_tree': uses sklearn.neighbors.dist_metrics
rng = np.random.RandomState(0)
X = rng.uniform(size=(6, 5))
NN = neighbors.NearestNeighbors
nn1 = NN(metric="jaccard", algorithm='brute').fit(X)
nn2 = NN(metric="jaccard", algorithm='ball_tree').fit(X)
assert_array_equal(nn1.kneighbors(X)[0], nn2.kneighbors(X)[0])
| bsd-3-clause |
hsiaoyi0504/scikit-learn | doc/conf.py | 210 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2014, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
jmschrei/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
xiaoxiamii/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
00krishna-research/py_university_gender_dynamics_pkg | tests/test_pyugend.py | 1 | 8479 | import pytest
from models.Models import Base_model
from models.Basic_stochastic_model import Basic_stochastic_model
from models.Stochastic_model_with_promotion import Stochastic_model_with_promotion
from models.Replication_model import Replication_model
from models.Stochastic_model_with_promotion_and_first_hiring import Stochastic_model_with_promotion_and_first_hiring
from comparisons.Comparison import Comparison
from models.Basic_stochastic_model_fixed_promotion import Basic_stochastic_model_fixed_promotion
import numpy as np
import pandas as pd
@pytest.fixture
def mock_data():
return ({'number_of_females_1': 14,
'number_of_females_2': 3,
'number_of_females_3': 19,
'number_of_males_1': 37,
'number_of_males_2': 28,
'number_of_males_3': 239,
'number_of_initial_vacancies_1': 5.303,
'number_of_initial_vacancies_2': 5.9,
'number_of_initial_vacancies_3': 8.31,
'hiring_rate_women_1': 0.310,
'hiring_rate_women_2': 0.222,
'hiring_rate_women_3': 0,
'attrition_rate_women_1': 0,
'attrition_rate_women_2': 0,
'attrition_rate_women_3': 0.017,
'attrition_rate_men_1': 0.009,
'attrition_rate_men_2': 0.017,
'attrition_rate_men_3': 0.033,
'probablity_of_outside_hire_1': 1,
'probability_of_outside_hire_2': 0.158,
'probability_of_outside_hire_3': 0.339,
'female_promotion_probability_1': 0.122,
'female_promotion_probability_2': 0.188,
'male_promotion_probability_1': 0.19,
'male_promotion_probability_2': 0.19,
'max_threshold': 0.1,
'prob_random_growth': 0.1,
'duration': 40})
def test_Base_model(mock_data):
assert isinstance(Base_model(**mock_data), Base_model)
def test_base_model_run(mock_data):
t = Base_model(**mock_data)
t.run_model()
assert (isinstance(t.res, np.ndarray))
def test_base_model_persistence(mock_data):
t = Base_model(**mock_data)
assert (t.nf1 == 14)
def test_base_model_multiple_runs(mock_data):
t = Basic_stochastic_model(**mock_data)
assert (isinstance(t.run_multiple(10), int))
def test_base_model_multiple_runs_persistent_state(mock_data):
t = Basic_stochastic_model(**mock_data)
t.run_multiple(10)
assert (isinstance(t.mean_matrix, np.ndarray))
def test_base_model_parameter_sweep(mock_data):
t = Stochastic_model_with_promotion(**mock_data)
v = t.run_parameter_sweep(12, 'female_pp_1', 0.1, 0.3, 2)
assert (isinstance(v, int))
def test_base_model_plot_multiple_runs(mock_data):
t = Basic_stochastic_model(**mock_data)
t.run_multiple(10)
t.plot_multiple_runs_detail()
#
# def test_base_model_multiple_runs_gender_prop(mock_data):
# t = Basic_stochastic_model(**mock_data)
# t.run_multiple(10)
# t.plot_multiple_runs_gender_prop()
def test_basic_stochastic_model(mock_data):
assert (isinstance(Basic_stochastic_model(**mock_data),
Basic_stochastic_model))
def test_basic_stochastic_model_run(mock_data):
t = Basic_stochastic_model(**mock_data).run_model()
assert isinstance(t, np.recarray)
def test_basic_stochastic_model_run_with_saved_data(mock_data):
t = Basic_stochastic_model(**mock_data)
t.run_model()
assert isinstance(t.run, np.recarray)
def test_basic_stochastic_model_promotion_probability_recovery(mock_data):
t = Basic_stochastic_model(**mock_data)
assert (t.female_promotion_probability_2 == 0.188)
def test_replication_model(mock_data):
t = Replication_model(**mock_data)
t.run_model()
assert (isinstance(t.run, np.ndarray))
# def test_base_model_multiple_runs_gender_prop(mock_data):
# t = Replication_model(**mock_data)
# t.run_multiple(10)
# t.plot_multiple_runs_gender_prop()
# def test_excel_export(mock_data):
# t = Basic_stochastic_model(**mock_data)
# t.export_model_run()
# assert(isinstance(t,Basic_stochastic_model))
def test_stochastic_model_with_hiring_first(mock_data):
t = Stochastic_model_with_promotion_and_first_hiring(**mock_data)
t.run_model()
assert (isinstance(t.run, np.ndarray))
def test_stochastic_model_with_hiring_first_multiple(mock_data):
t = Stochastic_model_with_promotion_and_first_hiring(**mock_data)
t.run_multiple(10)
assert (isinstance(t.mean_matrix, np.ndarray))
def test_comparison_model_plot_detail(mock_data):
modlist = list([Replication_model(**mock_data),
Basic_stochastic_model(**mock_data),
Stochastic_model_with_promotion_and_first_hiring(**mock_data)])
c = Comparison(modlist)
c.plot_comparison_detail(10)
def test_comparison_model_param_sweep(mock_data):
modlist = list([Replication_model(**mock_data),
Basic_stochastic_model(**mock_data),
Stochastic_model_with_promotion_and_first_hiring(**mock_data)])
c = Comparison(modlist)
c.plot_parameter_sweep_gender_proportion(10, 'female_promotion_probability_2', 0.1, 0.5, 8)
def test_comparison_model_param_sweep_detail(mock_data):
modlist = list([Replication_model(**mock_data),
Basic_stochastic_model(**mock_data),
Stochastic_model_with_promotion_and_first_hiring(**mock_data)])
c = Comparison(modlist)
c.plot_parameter_sweep_detail(10, 'female_promotion_probability_2', 0.1, 0.5, 8)
def test_base_model_probability_calc_detail_array(mock_data):
t = Stochastic_model_with_promotion_and_first_hiring(**mock_data)
res = t.run_probability_analysis_parameter_sweep_gender_detail(10,
'female_promotion_probability_2',
'm2', 0.1, 0.8, 8, 150)
print(res)
assert (isinstance(res, pd.DataFrame))
# def test_base_model_probability_calc_plot(mock_data):
# t = Stochastic_model_with_promotion_and_first_hiring(**mock_data)
# t.plot_empirical_probability_group_detail(10,
# 'female_promotion_probability_2',
# 'm2', 0.1, 0.8, 8, 150)
#
# def test_comparison_empirical_probability_detail_plot(mock_data):
# modlist = list([Replication_model(**mock_data),
# Basic_stochastic_model(**mock_data),
# Stochastic_model_with_promotion_and_first_hiring(**mock_data)])
# c = Comparison(modlist)
# c.plot_comparison_empirical_probability_detail(10,
# 'female_promotion_probability_2',
# 'm2', 0.1, 0.8, 20, 150)
# def test_plot_dept_size_over_time(mock_data):
# t = Stochastic_model_with_promotion_and_first_hiring(**mock_data)
# t.plot_department_size_over_time_multiple_runs(10)
# def test_plot_comparision_department_size(mock_data):
# modlist = list([Basic_stochastic_model_fixed_promotion(**mock_data),
# Basic_stochastic_model(**mock_data),
# Stochastic_model_with_promotion_and_first_hiring(**mock_data)])
# c = Comparison(modlist)
# c.plot_comparison_department_size()
def test_multiple_runs_created_res_array(mock_data):
t = Stochastic_model_with_promotion_and_first_hiring(**mock_data)
t.run_multiple(10)
assert hasattr(t, 'probability_matrix')
#
# def test_plot_empirical_probability_gender_proportion(mock_data):
# t = Stochastic_model_with_promotion_and_first_hiring(**mock_data)
# t.plot_empirical_probability_gender_proportion(100, 0.19)
def test_plot_comparision_empirical_probability_gender_proportion(mock_data):
modlist = list([Basic_stochastic_model(**mock_data),
Basic_stochastic_model_fixed_promotion(**mock_data),
Stochastic_model_with_promotion_and_first_hiring(**mock_data)])
c = Comparison(modlist)
c.plot_comparison_empirical_probability_gender_proportion(100, 0.19)
def test_basic_stochastic_with_random_dept_growth(mock_data):
t = Basic_stochastic_model(**mock_data)
assert (t.max_threshold, 0.1)
def test_basic_stochastic_with_random_dept_growth(mock_data):
t = Basic_stochastic_model(**mock_data)
assert (t.max_threshold, 0.1) | mit |
thomasaarholt/hyperspy | hyperspy/drawing/signal.py | 4 | 4534 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
# This file contains plotting code generic to the BaseSignal class.
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from traits.api import Undefined
from hyperspy.drawing.utils import set_axes_decor
def _plot_1D_component(factors, idx, axes_manager, ax=None,
calibrate=True, comp_label=None,
same_window=False):
if ax is None:
ax = plt.gca()
axis = axes_manager.signal_axes[0]
if calibrate:
x = axis.axis
plt.xlabel(axis.units)
else:
x = np.arange(axis.size)
plt.xlabel('Channel index')
ax.plot(x, factors[:, idx], label='%i' % idx)
if comp_label and not same_window:
plt.title('%s' % comp_label)
return ax
def _plot_2D_component(factors, idx, axes_manager,
calibrate=True, ax=None,
comp_label=None, cmap=plt.cm.gray,
axes_decor='all'
):
if ax is None:
ax = plt.gca()
axes = axes_manager.signal_axes[::-1]
shape = axes_manager._signal_shape_in_array
extent = None
if calibrate:
extent = (axes[1].low_value,
axes[1].high_value,
axes[0].high_value,
axes[0].low_value)
if comp_label:
plt.title('%s' % idx)
im = ax.imshow(factors[:, idx].reshape(shape),
cmap=cmap, interpolation='nearest',
extent=extent)
# Set axes decorations based on user input
set_axes_decor(ax, axes_decor)
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
return ax
def _plot_loading(loadings, idx, axes_manager, ax=None,
comp_label=None, no_nans=True,
calibrate=True, cmap=plt.cm.gray,
same_window=False, axes_decor='all'):
if ax is None:
ax = plt.gca()
if no_nans:
loadings = np.nan_to_num(loadings)
axes = axes_manager.navigation_axes
if axes_manager.navigation_dimension == 2:
extent = None
# get calibration from a passed axes_manager
shape = axes_manager._navigation_shape_in_array
if calibrate:
extent = (axes[0].low_value,
axes[0].high_value,
axes[1].high_value,
axes[1].low_value)
im = ax.imshow(loadings[idx].reshape(shape),
cmap=cmap, extent=extent,
interpolation='nearest')
if calibrate:
plt.xlabel(axes[0].units)
plt.ylabel(axes[1].units)
else:
plt.xlabel('pixels')
plt.ylabel('pixels')
if comp_label:
if same_window:
plt.title('%s' % idx)
else:
plt.title('%s #%s' % (comp_label, idx))
# Set axes decorations based on user input
set_axes_decor(ax, axes_decor)
div = make_axes_locatable(ax)
cax = div.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax)
elif axes_manager.navigation_dimension == 1:
if calibrate:
x = axes[0].axis
else:
x = np.arange(axes[0].size)
ax.step(x, loadings[idx],
label='%s' % idx)
if comp_label and not same_window:
plt.title('%s #%s' % (comp_label, idx))
plt.ylabel('Score (a. u.)')
if calibrate:
if axes[0].units is not Undefined:
plt.xlabel(axes[0].units)
else:
plt.xlabel('depth')
else:
plt.xlabel('depth')
else:
raise ValueError('View not supported')
| gpl-3.0 |
NunoEdgarGub1/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
iemejia/incubator-beam | sdks/python/apache_beam/dataframe/convert_test.py | 1 | 2180 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest
import pandas as pd
import apache_beam as beam
from apache_beam.dataframe import convert
from apache_beam.testing.util import assert_that
class ConverTest(unittest.TestCase):
def test_convert(self):
def equal_to_unordered_series(expected):
def check(actual):
actual = pd.concat(actual)
if sorted(expected) != sorted(actual):
raise AssertionError(
'Series not equal: \n%s\n%s\n' % (expected, actual))
return check
with beam.Pipeline() as p:
a = pd.Series([1, 2, 3])
b = pd.Series([100, 200, 300])
pc_a = p | 'A' >> beam.Create([a])
pc_b = p | 'B' >> beam.Create([b])
df_a = convert.to_dataframe(pc_a, proxy=a[:0])
df_b = convert.to_dataframe(pc_b, proxy=b[:0])
df_2a = 2 * df_a
df_3a = 3 * df_a
df_ab = df_a * df_b
# Converting multiple results at a time can be more efficient.
pc_2a, pc_ab = convert.to_pcollection(df_2a, df_ab)
# But separate conversions can be done as well.
pc_3a = convert.to_pcollection(df_3a)
assert_that(pc_2a, equal_to_unordered_series(2 * a), label='Check2a')
assert_that(pc_3a, equal_to_unordered_series(3 * a), label='Check3a')
assert_that(pc_ab, equal_to_unordered_series(a * b), label='Checkab')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
gotomypc/scikit-learn | sklearn/neighbors/base.py | 115 | 29783 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..utils.validation import NotFittedError
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, last dimension same as that of fit data, optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([1., 1., 1.])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| bsd-3-clause |
RegulatoryGenomicsUPF/pyicoteo | pyicoteolib/enrichment.py | 1 | 40209 | """
Pyicoteo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys, os
import math
import random
from core import Cluster, Region, InvalidLine, InsufficientData, ConversionNotSupported
from defaults import *
import utils
import bam
from regions import AnnotationGene, AnnotationTranscript, AnnotationExon, RegionWriter, read_gff_file, get_exons, get_introns, gene_slide
import warnings
try:
from shutil import move
except:
from os import rename as move
"""
Differential expression and MA plot visualization module.
"""
def _region_from_dual(self, line):
try:
self.cluster_aux.clear()
self.cluster_aux.read_line(line)
strand = None
if self.stranded_analysis:
strand = self.cluster_aux.strand
ret = Region(self.cluster_aux.name, self.cluster_aux.start, self.cluster_aux.end, name2=self.cluster_aux.name2, strand=strand)
self.cluster_aux.clear()
return ret
except ValueError:
pass #discarding header
def __calc_reg_write(self, region_file, count, calculated_region):
if count > self.region_mintags:
region_file.write(calculated_region.write())
def calculate_region(self):
"""
Calculate a region file using the reads present in the both main files to analyze.
"""
self.logger.info('Generating regions...')
self.sorted_region_path = '%s/calcregion_%s.bed'%(self._output_dir(), os.path.basename(self.current_output_path))
region_file = open(self.sorted_region_path, 'wb')
if self.region_magic:
regwriter = RegionWriter(self.gff_file, region_file, self.region_magic, no_sort=self.no_sort, logger=self.logger, write_as=BED, galaxy_workarounds=self.galaxy_workarounds)
regwriter.write_regions()
dual_reader = utils.DualSortedReader(self.current_experiment_path, self.current_control_path, self.experiment_format, self.logger)
if self.stranded_analysis:
calculate_region_stranded(self, dual_reader, region_file)
else:
calculate_region_notstranded(self, dual_reader, region_file)
region_file.flush()
def __cr_append(self, regions, region):
regions.append(region)
def calculate_region_notstranded(self, dual_reader, region_file):
calculated_region = Region()
readcount = 1
for line in dual_reader:
if not calculated_region: #first region only
calculated_region = _region_from_dual(self, line)
calculated_region.end += self.proximity
else:
new_region = _region_from_dual(self, line)
new_region.end += self.proximity
if calculated_region.overlap(new_region):
calculated_region.join(new_region)
readcount += 1
else:
calculated_region.end -= self.proximity
__calc_reg_write(self, region_file, readcount, calculated_region)
calculated_region = new_region.copy()
readcount = 1
if calculated_region:
calculated_region.end -= self.proximity
__calc_reg_write(self, region_file, readcount, calculated_region)
def calculate_region_stranded(self, dual_reader, region_file):
temp_region_file = open(self.sorted_region_path, 'wb')
region_plus = Region()
region_minus = Region()
regions = []
numreads_plus = 1
numreads_minus = 1
dual_reader = utils.DualSortedReader(self.current_experiment_path, self.current_control_path, self.experiment_format, self.logger)
for line in dual_reader:
new_region = _region_from_dual(self, line)
new_region.end += self.proximity
if not (region_plus and new_region.strand == PLUS_STRAND):
region_plus = _region_from_dual(self, line)
elif not (region_plus and new_region.strand == PLUS_STRAND):
region_minus = _region_from_dual(self, line)
else:
if region_plus.overlap(new_region) and region_plus.strand == new_region.strand:
region_plus.join(new_region)
numreads_plus += 1
elif region_minus.overlap(new_region) and region_minus.strand == new_region.strand:
region_minus.join(new_region)
numreads_minus += 1
else:
if new_region.strand == region_plus.strand:
region_plus.end -= self.proximity
self.__calc_reg_write(region_file, numreads_plus, region_plus)
region_plus = new_region.copy()
numreads_plus = 1
else:
region_minus.end -= self.proximity
self.__calc_reg_write(region_file, numreads_minus, region_minus)
region_minus = new_region.copy()
numreads_minus = 1
if region_plus:
region_plus.end -= self.proximity
regions.append(region_plus)
if region_minus:
region_minus.end -= self.proximity
regions.append(region_minus)
regions.sort(key=lambda x:(x.name, x.start, x.end, x.strand))
for region in regions:
region_file.write(region.write())
def get_zscore(x, mean, sd):
if sd > 0:
return float(x-mean)/sd
else:
return 0 #This points are weird anyway
def read_interesting_regions(self, file_path):
regs = []
try:
regs_file = open(file_path, 'r')
for line in regs_file:
regs.append(line.strip())
except IOError as ioerror:
self.logger.warning("Interesting regions file not found")
return regs # memory inefficient if there's a large number of interesting regions
def plot_enrichment(self, file_path):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import *
from matplotlib import rcParams
rcParams.update({'font.size': 22})
rcParams['legend.fontsize'] = 14
#decide labels
if self.label1:
label_main = self.label1
else:
if self.real_control_path and self.real_experiment_path:
label_main = '%s VS %s'%(os.path.basename(self.real_experiment_path), os.path.basename(self.real_control_path))
else:
label_main = "A VS B"
if self.label2:
label_control = self.label2
else:
if self.replica_path:
label_control = '%s(A) VS %s(A)'%(os.path.basename(self.real_experiment_path), os.path.basename(self.replica_path))
else:
label_control = 'Background distribution'
#self.logger.info("Interesting regions path: %s" % (self.interesting_regions))
interesting_regs = []
if self.interesting_regions:
self.logger.info("Reading interesting regions...")
interesting_regs = read_interesting_regions(self, self.interesting_regions)
#self.logger.info("Interesting regions: %s" % (interesting_regs))
#self.logger.info("Plot path: %s" % (file_path))
interesting_A = []
interesting_M = []
#self.logger.info("disable_significant: %s" % (self.disable_significant_color))
A = []
A_prime = []
M = []
M_significant = []
A_significant = []
M_prime = []
A_medians = []
points = []
minus_points = []
all_points = []
figure(figsize=(14,22))
biggest_A = -sys.maxint #for drawing
smallest_A = sys.maxint #for drawing
biggest_M = 0 #for drawing
self.logger.info("Loading table...")
for line in open(file_path):
sline = line.split()
try:
enrich = dict(zip(enrichment_keys, sline))
# WARNING: for slide inter and slide intra: name2 = 'start:end' (no gene_id, FIXME?)
name2 = enrich['name2'].split(':')
gene_id = name2[0]
if len(name2) >= 2:
transcript_id = name2[1] # consider transcript_id? (exons)
else:
transcript_id = None
if gene_id in interesting_regs or transcript_id in interesting_regs:
interesting_M.append(float(enrich["M"]))
interesting_A.append(float(enrich["A"]))
biggest_A = max(biggest_A, float(enrich["A"]))
smallest_A = min(smallest_A, float(enrich["A"]))
biggest_M = max(biggest_M, abs(float(enrich["M"])))
biggest_A = max(biggest_A, float(enrich["A_prime"]))
smallest_A = min(smallest_A, float(enrich["A_prime"]))
biggest_M = max(biggest_M, abs(float(enrich["M_prime"])))
positive_point = self.zscore*float(enrich["sd"])+float(enrich["mean"])
negative_point = -self.zscore*float(enrich["sd"])+float(enrich["mean"])
A_median = float(enrich["A_median"])
all_points.append((A_median, positive_point, negative_point))
if abs(float(enrich["zscore"])) < self.zscore:
M.append(float(enrich["M"]))
A.append(float(enrich["A"]))
else:
M_significant.append(float(enrich["M"]))
A_significant.append(float(enrich["A"]))
M_prime.append(float(enrich["M_prime"]))
A_prime.append(float(enrich["A_prime"]))
except ValueError:
pass #to skip the header
all_points.sort(key= lambda x:x[0])
for t in all_points:
(A_medians.append(t[0]), points.append(t[1]), minus_points.append(t[2]))
if points:
margin = 1.1
A_medians.append(biggest_A*margin)
points.append(points[-1])
minus_points.append(minus_points[-1])
A_medians.insert(0, smallest_A)
points.insert(0, points[0])
minus_points.insert(0, minus_points[0])
self.logger.info("Plotting points...")
#Background plot
subplot(211, axisbg="lightyellow")
xlabel('Average', fontsize=30)
ylabel('Log2 ratio', fontsize=30)
axis([smallest_A*margin, biggest_A*margin, -biggest_M*margin, biggest_M*margin])
plot(A_prime, M_prime, '.', label=label_control, color = '#666666')
plot(A_medians, points, 'r--', label="Z-score (%s)"%self.zscore)
plot(A_medians, minus_points, 'r--')
axhline(0, linestyle='--', color="grey", alpha=0.75)
leg = legend(fancybox=True, scatterpoints=1, numpoints=1, loc=2, ncol=4, mode="expand")
leg.get_frame().set_alpha(0.5)
#Experiment plot
subplot(212, axisbg="lightyellow")
axis([smallest_A*margin, biggest_A*margin, -biggest_M*margin, biggest_M*margin])
plot(A, M, 'k.', label=label_main)
if self.disable_significant_color:
significant_marker = 'ko'
else:
significant_marker = 'ro'
plot(A_significant, M_significant, significant_marker, label="%s (significant)"%label_main)
plot(A_medians, points, 'r--', label="Z-score (%s)"%self.zscore)
plot(A_medians, minus_points, 'r--')
if self.interesting_regions:
interesting_label = label_main + ' (interesting)'
plot(interesting_A, interesting_M, 'H', label=interesting_label, color='#00EE00') # plotting "interesting" regions
axhline(0, linestyle='--', color="grey", alpha=0.75)
xlabel('Average', fontsize=30)
ylabel('Log2 ratio', fontsize=30)
leg2 = legend(fancybox=True, scatterpoints=1, numpoints=1, loc=2, ncol=4)
leg2.get_frame().set_alpha(0.7)
self._save_figure("enrichment_MA", width=500, height=2800)
else:
self.logger.warning("Nothing to plot.")
except ImportError:
if self.debug:
raise
__matplotlibwarn(self)
def __matplotlibwarn(self):
#FIXME move to utils.py or plotting module
self.logger.warning('Pyicos can not find an installation of matplotlib, so no plot will be drawn. If you want to get a plot with the correlation values, install the matplotlib library.')
def __calc_M(signal_a, signal_b):
return math.log(float(signal_a)/float(signal_b), 2)
def __calc_A(signal_a, signal_b):
return (math.log(float(signal_a), 2)+math.log(float(signal_b), 2))/2
def _calculate_MA(self, region_path, read_counts, factor = 1, replica_factor = 1, file_a_reader=None, file_b_reader=None, replica_reader=None):
tags_a = []
tags_b = []
numreads_background_1 = 0
numreads_background_2 = 0
total_reads_background_1 = 0
total_reads_background_2 = 0
self.logger.debug("Inside _calculate_MA")
self.regions_analyzed_count = 0
enrichment_result = [] #This will hold the name, start and end of the region, plus the A, M, 'A and 'M
if NOWRITE not in self.operations:
out_file = open(self.current_output_path, 'wb')
for region_line in open(region_path):
sline = region_line.split()
region_of_interest = self._region_from_sline(sline)
if region_of_interest:
region_a = None
replica = None
replica_tags = None
signal_a = -1
signal_b = -1
signal_background_1 = -1
signal_background_2 = -1
swap1 = Region()
swap2 = Region()
if read_counts:
signal_a = float(sline[6])
signal_b = float(sline[7])*factor
signal_background_1 = float(sline[8])
signal_background_2 = float(sline[9])*replica_factor
if CHECK_REPLICAS in self.operations:
self.experiment_values.append(signal_background_1)
self.replica_values.append(signal_background_2)
else:
self.logger.debug("Reading tags for %s ..."%region_of_interest)
if self.experiment_format == BAM:
tags_a = len(file_a_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
tags_b = len(file_b_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
else:
tags_a = file_a_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
tags_b = file_b_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
if self.use_replica:
if self.experiment_format == BAM:
replica_tags = len(replica_reader.get_overlaping_clusters(region_of_interest, overlap=self.overlap))
else:
replica_tags = replica_reader.get_overlaping_counts(region_of_interest, overlap=self.overlap)
self.logger.debug("... done. tags_a: %s tags_b: %s"%(tags_a, tags_b))
#if we are using pseudocounts, use the union, use the intersection otherwise
if (self.pseudocount and (tags_a or tags_b)) or (not self.pseudocount and tags_a and tags_b):
signal_a = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount, factor, self.total_reads_a, tags_a)
signal_b = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount, factor, self.total_reads_b, tags_b)
self.already_norm = True
if not self.counts_file:
if (self.pseudocount and (tags_a or tags_b)) or (not self.pseudocount and tags_a and tags_b):
if self.use_replica:
replica = region_of_interest.copy()
#replica.add_tags(replica_tags)
numreads_background_1 = tags_a
numreads_background_2 = replica_tags
total_reads_background_1 = self.total_reads_a
total_reads_background_2 = self.total_reads_replica
signal_background_1 = signal_a
signal_background_2 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.total_reads_replica, replica_tags)
else:
numreads_background_1 = 0
numreads_background_2 = 0
for i in range(0, tags_a+tags_b):
if random.uniform(0,2) > 1:
numreads_background_1 += 1
else:
numreads_background_2 += 1
total_reads_background_1 = total_reads_background_2 = self.average_total_reads
signal_background_1 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.average_total_reads, numreads_background_1)
signal_background_2 = region_of_interest.normalized_counts(self.len_norm, self.n_norm, self.total_regions, self.pseudocount,
replica_factor, self.average_total_reads, numreads_background_2)
#if there is no data in the replica or in the swap and we are not using pseudocounts, dont write the data
if signal_a > 0 and signal_b > 0 and signal_background_1 > 0 and signal_background_2 > 0 or self.use_MA:
if self.use_MA and not self.already_norm:
A = float(sline[10])
M = float(sline[11])
A_prime = float(sline[16])
M_prime = float(sline[17])
else:
if not self.already_norm: #TODO refractor
if self.len_norm: #read per kilobase in region
signal_a = 1e3*(float(signal_a)/len(region_of_interest))
signal_b = 1e3*(float(signal_b)/len(region_of_interest))
signal_background_1 = 1e3*(float(signal_background_1)/len(region_of_interest))
signal_background_2 = 1e3*(float(signal_background_2)/len(region_of_interest))
if self.n_norm: #per million reads in the sample
signal_a = 1e6*(float(signal_a)/self.total_reads_a)
signal_b = 1e6*(float(signal_b)/self.total_reads_b)
if self.use_replica:
signal_background_1 = signal_a
signal_background_2 = 1e6*(float(signal_background_2)/self.total_reads_replica)
else:
signal_background_1 = 1e6*(float(signal_background_1)/self.average_total_reads)
signal_background_2 = 1e6*(float(signal_background_2)/self.average_total_reads)
A = __calc_A(signal_a, signal_b)
M = __calc_M(signal_a, signal_b)
A_prime = __calc_A(signal_background_1, signal_background_2)
M_prime = __calc_M(signal_background_1, signal_background_2)
if CHECK_REPLICAS in self.operations:
self.experiment_values.append(signal_background_1)
self.replica_values.append(signal_background_2)
if NOWRITE not in self.operations:
out_file.write("%s\n"%("\t".join([region_of_interest.write().rstrip("\n"), str(signal_a), str(signal_b), str(signal_background_1), str(signal_background_2), str(A), str(M), str(self.total_reads_a), str(self.total_reads_b), str(tags_a), str(tags_b), str(A_prime), str(M_prime), str(total_reads_background_1), str(total_reads_background_2), str(numreads_background_1), str(numreads_background_2)])))
self.regions_analyzed_count += 1
self.logger.debug("LEAVING _calculate_MA")
if NOWRITE in self.operations:
return ""
else:
out_file.flush()
out_file.close()
# Outputting to HTML (if specified)
if self.html_output is not None:
self.logger.info("Generating HTML")
try:
from jinja2 import Environment, PackageLoader, Markup
except:
self.logger.error("Could not find the jinja2 library")
return out_file.name
loadr = PackageLoader('pyicoteolib', 'templates')
env = Environment(loader=loadr)
template = env.get_template('enrich_html.html')
def jinja_read_file(filename):
f = open(filename, 'r')
#for line in f:
# print line
txt = ''.join(f.readlines())
f.close()
return txt
env.globals['jinja_read_file'] = jinja_read_file
if self.galaxy_workarounds: # Galaxy changes the working directory when outputting multiple files
parent_dir = "./"
else:
parent_dir = os.sep.join(out_file.name.split(os.sep)[0:-1]) + "/"
plot_path = parent_dir + "enrichment_MA_" + out_file.name.split(os.sep)[-1] + ".png"
bed_path = parent_dir + out_file.name.split(os.sep)[-1]
html_file = open(self.html_output, 'w')
html_file.write(template.render({'page_title': 'Enrichment results', 'results_output': jinja_read_file(out_file.name), 'plot_path': plot_path, 'bed_path': bed_path}))
html_file.flush()
html_file.close()
return out_file.name
def _calculate_total_lengths(self):
msg = "Calculating enrichment in regions"
if self.counts_file:
self.sorted_region_path = self.counts_file
if (not self.total_reads_a or not self.total_reads_b or (not self.total_reads_replica and self.use_replica)) and not self.use_MA:
self.logger.info("... counting from counts file...")
self.total_reads_a = 0
self.total_reads_b = 0
if self.total_reads_replica:
self.total_reads_replica = 0
else:
self.total_reads_replica = 1
for line in open(self.counts_file):
try:
enrich = dict(zip(enrichment_keys, line.split()))
self.total_reads_a += float(enrich["signal_a"])
self.total_reads_b += float(enrich["signal_b"])
if self.use_replica:
self.total_reads_replica += float(enrich["signal_prime_2"])
except ValueError:
self.logger.debug("(Counting) skip header...")
else:
self.logger.info("... counting number of lines in files...")
if not self.total_reads_a:
if self.experiment_format == BAM:
self.total_reads_a = bam.size(self.current_experiment_path)
else:
self.total_reads_a = sum(1 for line in utils.open_file(self.current_experiment_path, self.experiment_format, logger=self.logger))
if not self.total_reads_b:
if self.experiment_format == BAM:
self.total_reads_b = bam.size(self.current_control_path)
else:
self.total_reads_b = sum(1 for line in utils.open_file(self.current_control_path, self.control_format, logger=self.logger))
if self.use_replica and not self.total_reads_replica:
if self.experiment_format == BAM:
self.total_reads_replica = bam.size(self.replica_path)
else:
self.total_reads_replica = sum(1 for line in utils.open_file(self.replica_path, self.experiment_format, logger=self.logger))
self.logger.debug("Number lines in experiment A: %s Experiment B: %s"%(self.total_reads_a, self.total_reads_b))
if self.use_replica:
msg = "%s using replicas..."%msg
else:
msg = "%s using swap..."%msg
self.logger.info(msg)
self.average_total_reads = (self.total_reads_a+self.total_reads_b)/2
def enrichment(self):
file_a_reader = file_b_reader = replica_reader = None
self.use_replica = (bool(self.replica_path) or (bool(self.counts_file) and self.use_replica_flag))
self.logger.debug("Use replica: %s"%self.use_replica)
if not USE_MA in self.operations:
_calculate_total_lengths(self)
if not self.counts_file:
file_a_reader = utils.read_fetcher(self.current_experiment_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
file_b_reader = utils.read_fetcher(self.current_control_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
if self.use_replica:
replica_reader = utils.read_fetcher(self.current_replica_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential, only_counts=True)
if self.sorted_region_path:
self.logger.info('Using region file %s (%s)'%(self.region_path, self.region_format))
else:
calculate_region(self) #create region file semi automatically
self.total_regions = sum(1 for line in open(self.sorted_region_path))
self.logger.info("... analyzing regions, calculating normalized counts, A / M and replica or swap...")
self.already_norm = False
if self.use_MA:
ma_path = self.counts_file
else:
ma_path = self.sorted_region_path
out_path = _calculate_MA(self, ma_path, bool(self.counts_file), 1, 1, file_a_reader, file_b_reader, replica_reader)
self.already_norm = True
self.logger.debug("Already normalized: %s"%self.already_norm)
if self.tmm_norm:
if CHECK_REPLICAS in self.operations:
self.experiment_values = []
self.replica_values = []
self.logger.info("TMM Normalizing...")
tmm_factor = calc_tmm_factor(self, out_path, self.regions_analyzed_count, False)
replica_tmm_factor = 1
if self.use_replica:
replica_tmm_factor = calc_tmm_factor(self, out_path, self.regions_analyzed_count, True)
#move output file to old output
#use as input
old_output = '%s/notnormalized_%s'%(self._current_directory(), os.path.basename(self.current_output_path))
move(os.path.abspath(self.current_output_path), old_output)
out_path = _calculate_MA(self, old_output, True, tmm_factor, replica_tmm_factor, True) #recalculate with the new factor, using the counts again
if self.quant_norm:
self.logger.info("Full quantile normalization...")
signal_a = []
signal_prime_1 = []
enrich = []
for line in open(out_path):
sline = line.split()
enrich_line = dict(zip(enrichment_keys, sline))
enrich.append(enrich_line)
signal_a.append(float(enrich_line['signal_a']))
signal_prime_1.append(float(enrich_line['signal_prime_1']))
#full quantile normalization
signal_a.sort()
enrich.sort(key=lambda x:float(x['signal_b']))
quant_counts = open('%s/quantcounts_%s'%(self._current_directory(), os.path.basename(self.current_output_path)), 'w')
for i in range(len(enrich)):
enrich[i]['signal_b'] = signal_a[i]
self.logger.info("Full quantile normalization replica...")
#full quantile normalization of the replica
signal_prime_1.sort()
enrich.sort(key=lambda x:float(x['signal_prime_2']))
for i in range(len(enrich)):
enrich[i]['signal_prime_2'] = signal_prime_1[i]
quant_counts.write("%s\n"%"\t".join(str(enrich[i][key]) for key in enrichment_keys[:20])) #write the lines
quant_counts.flush()
out_path = _calculate_MA(self, quant_counts.name, True, 1, 1, True) #recalculate with the new factor, using the counts again
self._manage_temp_file(quant_counts.name)
self.logger.info("%s regions analyzed."%self.regions_analyzed_count)
if not NOWRITE in self.operations:
self.logger.info("Enrichment result saved to %s"%self.current_output_path)
if CHECK_REPLICAS in self.operations:
check_replica(self)
return out_path
def _sub_tmm(counts_a, counts_b, reads_a, reads_b):
return (counts_a-reads_a)/(counts_a*reads_a) + (counts_b-reads_b)/(counts_b*reads_b)
def calc_tmm_factor(self, file_counts, total_regions, replica):
if replica:
signal_1 = "signal_prime_1"
signal_2 = "signal_prime_2"
M = "M_prime"
reads_2 = self.total_reads_replica
else:
signal_1 = "signal_a"
signal_2 = "signal_b"
M = "M"
reads_2 = self.total_reads_b
values_list = []
#read the file inside the values_list
for line in open(file_counts):
sline = line.split()
values_list.append(dict(zip(enrichment_keys, sline)))
a_trim_number = int(round(total_regions*self.a_trim))
#discard the bad A
self.logger.debug("Removing the worst A (%s regions, %s percent)"%(a_trim_number, self.a_trim*100))
values_list.sort(key=lambda x:float(x["A"])) #sort by A
for i in range (0, a_trim_number):
values_list.pop(0)
values_list.sort(key=lambda x:float(x[M])) #sort by M
m_trim_number = int(round(total_regions*(self.m_trim/2))) #this number is half the value of the flag, because we will trim half below, and half over
#remove on the left
for i in range(0, m_trim_number):
values_list.pop(0)
#remove on the right
for i in range(0, m_trim_number):
values_list.pop(-1)
#now calculate the normalization factor
arriba = 0
abajo = 0
for value in values_list:
w = _sub_tmm(float(value[signal_1]), float(value[signal_2]), self.total_reads_a, reads_2)
arriba += w*float(value[M])
abajo += w
try:
factor = 2**(arriba/abajo)
except ZeroDivisionError:
self.logger.warning("Division by zero, TMM factor could not be calculated.")
factor = 1
if replica:
self.logger.info("Replica TMM Normalization Factor: %s"%factor)
else:
self.logger.info("TMM Normalization Factor: %s"%factor)
return factor
def __load_enrichment_result(values_path):
ret = []
for line in open(values_path):
sline = line.split()
try:
float(sline[1])
ret.append(dict(zip(enrichment_keys, sline)))
except ValueError:
pass
return ret
def calculate_zscore(self, values_path):
num_regions = sum(1 for line in open(values_path))
bin_size = int(self.binsize*num_regions)
if bin_size < 50:
self.logger.warning("The bin size results in a sliding window smaller than 50, adjusting window to 50 in order to get statistically meaningful results.")
bin_size = 50
bin_step = max(1, int(round(self.bin_step*bin_size)))
self.logger.info("Enrichment window calculation using a sliding window size of %s, sliding with a step of %s"%(bin_size, bin_step))
self.logger.info("... calculating zscore...")
enrichment_result = __load_enrichment_result(values_path)
enrichment_result.sort(key= lambda x:(float(x["A_prime"])))
self.logger.debug("Number of loaded counts: %s"%len(enrichment_result))
self.points = []
#get the standard deviations
for i in range(0, num_regions-bin_size+bin_step, bin_step):
#get the slice
if i+bin_size < num_regions:
result_chunk = enrichment_result[i:i+bin_size]
else:
result_chunk = enrichment_result[i:] #last chunk
#retrieve the values
mean_acum = 0
a_acum = 0
Ms_replica = []
for entry in result_chunk:
mean_acum += float(entry["M_prime"])
a_acum += float(entry["A_prime"])
Ms_replica.append(float(entry["M_prime"]))
#add them to the points of mean and sd
mean = mean_acum/len(result_chunk)
sd = math.sqrt((sum((x - mean)**2 for x in Ms_replica))/len(Ms_replica))
#the A median
A_median = a_acum / len(result_chunk)
self.points.append([A_median, mean, sd]) #The A asigned to the window, the mean and the standard deviation
#self.logger.debug("Window of %s length, with A median: %s mean: %s sd: %s"%(len(result_chunk), self.points[-1][0], self.points[-1][1], self.points[-1][2], len(self.points)))
#update z scores
for entry in enrichment_result:
entry["A_median"] = 0
entry["mean"] = 0
entry["sd"] = 0
entry["zscore"] = 0
closest_A = sys.maxint
sd_position = 0
for i in range(0, len(self.points)):
new_A = self.points[i][0]
if new_A != closest_A: #skip repeated points
if abs(closest_A - float(entry["A"])) >= abs(new_A - float(entry["A"])):
closest_A = new_A
sd_position = i
else:
break #already found, no need to go further since the points are ordered
entry["A_median"] = closest_A
if self.points: #only calculate if there where windows...
__sub_zscore(self.sdfold, entry, self.points[sd_position])
if not self.points: # ... otherwise give a warning
self.logger.warning("Insufficient number of regions analyzed (%s), z-score values could not be calculated"%num_regions)
enrichment_result.sort(key=lambda x:(x["name"], int(x["start"]), int(x["end"])))
old_file_path = '%s/before_zscore_%s'%(self._current_directory(), os.path.basename(values_path)) #create path for the outdated file
move(os.path.abspath(values_path), old_file_path) #move the file
new_file = file(values_path, 'w') #open a new file in the now empty file space
if not self.skip_header:
new_file.write('\t'.join(enrichment_keys))
new_file.write('\n')
for entry in enrichment_result:
new_file.write("\t".join(str(entry[key]) for key in enrichment_keys)+"\n")
self._manage_temp_file(old_file_path)
return values_path
def __sub_zscore(sdfold, entry, point):
entry["mean"] = str(point[1])
entry["sd"] = str(point[2])
entry["zscore"] = str(get_zscore(float(entry["M"]), float(entry["mean"]), sdfold*float(entry["sd"])))
def check_replica(self):
#discard everything below the flag
new_experiment = []
new_replica = []
min_value = sys.maxint
max_value = -sys.maxint
for i in range(len(self.replica_values)):
if self.experiment_values[i] > self.count_filter and self.replica_values[i] > self.count_filter:
new_experiment.append(math.log(self.experiment_values[i], 2))
new_replica.append(math.log(self.replica_values[i], 2))
min_value = min(min_value, math.log(self.experiment_values[i], 2), math.log(self.replica_values[i], 2))
max_value = max(max_value, math.log(self.experiment_values[i], 2), math.log(self.replica_values[i], 2))
#print self.replica_values
self.experiment_values = new_experiment
self.replica_values = new_replica
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import plot, show, xlabel, ylabel, axhline, axis, clf, text, title, xlim, ylim
except:
__matplotlibwarn(self)
return 0
clf()
r_squared = utils.pearson(self.experiment_values, self.replica_values)**2
text(min_value+abs(max_value)*0.1, max_value-abs(max_value)*0.2, r'Pearson $R^2$= %s'%round(r_squared, 3), fontsize=18, bbox={'facecolor':'yellow', 'alpha':0.5, 'pad':10})
xlabel("log2(%s)"%self.experiment_label, fontsize=18)
ylabel("log2(%s)"%self.replica_label, fontsize=18)
xlim(min_value, max_value)
ylim(min_value, max_value)
title(self.title_label, fontsize=24)
plot(self.experiment_values, self.replica_values, '.')
self._save_figure("check_replica")
def check_replica_correlation(self):
"No usado, de momento"
min_tags = 20
experiment_reader = utils.read_fetcher(self.current_experiment_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential)
replica_reader = utils.read_fetcher(self.current_replica_path, self.experiment_format, cached=self.cached, logger=self.logger, use_samtools=self.use_samtools, access_sequential=self.access_sequential)
correlations_acum = 0
num_correlations = 0
for region_line in open(self.region_path):
sline = region_line.split()
region_experiment = self._region_from_sline(sline)
region_replica = region_experiment.copy()
tags_experiment = experiment_reader.get_overlaping_clusters(region_experiment, overlap=1)
tags_replica = replica_reader.get_overlaping_clusters(region_experiment, overlap=1)
count_experiment = len(tags_experiment)
count_replica = len(tags_replica)
correlations = []
if count_experiment+count_replica > min_tags:
region_experiment.add_tags(tags_experiment, clusterize=True)
region_replica.add_tags(tags_replica, clusterize=True)
num_correlations += 1
correlation = utils.pearson(region_experiment.get_array(), region_replica.get_array())
correlations_acum += max(0, correlation)
correlations.append(correlation)
print correlations_acum/num_correlations
try:
if self.postscript:
import matplotlib
matplotlib.use("PS")
from matplotlib.pyplot import plot, boxplot, show, legend, figure, xlabel, ylabel, subplot, axhline, axis
except:
__matplotlibwarn(self)
return 0
print correlations
boxplot(correlations)
self._save_figure("check_replica") | gpl-3.0 |
tardis-sn/tardis | tardis/plasma/properties/nlte.py | 1 | 10502 | import logging
import os
import numpy as np
import pandas as pd
from tardis.plasma.properties.base import (
PreviousIterationProperty,
ProcessingPlasmaProperty,
)
from tardis.plasma.properties.ion_population import PhiSahaNebular
__all__ = [
"PreviousElectronDensities",
"PreviousBetaSobolev",
"HeliumNLTE",
"HeliumNumericalNLTE",
]
logger = logging.getLogger(__name__)
class PreviousElectronDensities(PreviousIterationProperty):
"""
Attributes
----------
previous_electron_densities : The values for the electron densities converged upon in the previous iteration.
"""
outputs = ("previous_electron_densities",)
def set_initial_value(self, kwargs):
initial_value = pd.Series(
1000000.0,
index=kwargs["abundance"].columns,
)
self._set_initial_value(initial_value)
class PreviousBetaSobolev(PreviousIterationProperty):
"""
Attributes
----------
previous_beta_sobolev : The beta sobolev values converged upon in the previous iteration.
"""
outputs = ("previous_beta_sobolev",)
def set_initial_value(self, kwargs):
initial_value = pd.DataFrame(
1.0,
index=kwargs["atomic_data"].lines.index,
columns=kwargs["abundance"].columns,
)
self._set_initial_value(initial_value)
class HeliumNLTE(ProcessingPlasmaProperty):
outputs = ("helium_population",)
@staticmethod
def calculate(
level_boltzmann_factor,
ionization_data,
beta_rad,
g,
g_electron,
w,
t_rad,
t_electrons,
delta,
zeta_data,
number_density,
partition_function,
):
"""
Updates all of the helium level populations according to the helium NLTE recomb approximation.
"""
helium_population = level_boltzmann_factor.loc[2].copy()
# He I excited states
he_one_population = HeliumNLTE.calculate_helium_one(
g_electron, beta_rad, ionization_data, level_boltzmann_factor, g, w
)
helium_population.loc[0].update(he_one_population)
# He I ground state
helium_population.loc[0, 0] = 0.0
# He II excited states
he_two_population = level_boltzmann_factor.loc[2, 1].mul(
(g.loc[2, 1, 0] ** (-1.0))
)
helium_population.loc[1].update(he_two_population)
# He II ground state
helium_population.loc[1, 0] = 1.0
# He III states
helium_population.loc[2, 0] = HeliumNLTE.calculate_helium_three(
t_rad,
w,
zeta_data,
t_electrons,
delta,
g_electron,
beta_rad,
ionization_data,
g,
)
# unnormalised = helium_population.sum()
# normalised = helium_population.mul(number_density.ix[2] / unnormalised)
# helium_population.update(normalised)
return helium_population
@staticmethod
def calculate_helium_one(
g_electron, beta_rad, ionization_data, level_boltzmann_factor, g, w
):
"""
Calculates the He I level population values, in equilibrium with the He II ground state.
"""
return (
level_boltzmann_factor.loc[2, 0]
* (1.0 / (2 * g.loc[2, 1, 0]))
* (1 / g_electron)
* (1 / (w ** 2.0))
* np.exp(ionization_data.loc[2, 1] * beta_rad)
)
@staticmethod
def calculate_helium_three(
t_rad,
w,
zeta_data,
t_electrons,
delta,
g_electron,
beta_rad,
ionization_data,
g,
):
"""
Calculates the He III level population values.
"""
zeta = PhiSahaNebular.get_zeta_values(zeta_data, 2, t_rad)[1]
he_three_population = (
2
* (float(g.loc[2, 2, 0]) / g.loc[2, 1, 0])
* g_electron
* np.exp(-ionization_data.loc[2, 2] * beta_rad)
* w
* (delta.loc[2, 2] * zeta + w * (1.0 - zeta))
* (t_electrons / t_rad) ** 0.5
)
return he_three_population
class HeliumNumericalNLTE(ProcessingPlasmaProperty):
"""
IMPORTANT: This particular property requires a specific numerical NLTE
solver and a specific atomic dataset (neither of which are distributed
with Tardis) to work.
"""
outputs = ("helium_population",)
def __init__(self, plasma_parent, heating_rate_data_file):
super(HeliumNumericalNLTE, self).__init__(plasma_parent)
self._g_upper = None
self._g_lower = None
self.heating_rate_data = np.loadtxt(heating_rate_data_file, unpack=True)
def calculate(
self,
ion_number_density,
electron_densities,
t_electrons,
w,
lines,
j_blues,
levels,
level_boltzmann_factor,
t_rad,
zeta_data,
g_electron,
delta,
partition_function,
ionization_data,
beta_rad,
g,
time_explosion,
):
logger.info("Performing numerical NLTE He calculations.")
if len(j_blues) == 0:
return None
# Outputting data required by SH module
for zone, _ in enumerate(electron_densities):
with open(
f"He_NLTE_Files/shellconditions_{zone}.txt", "w"
) as output_file:
output_file.write(ion_number_density.loc[2].sum()[zone])
output_file.write(electron_densities[zone])
output_file.write(t_electrons[zone])
output_file.write(self.heating_rate_data[zone])
output_file.write(w[zone])
output_file.write(time_explosion)
output_file.write(t_rad[zone])
output_file.write(self.plasma_parent.v_inner[zone])
output_file.write(self.plasma_parent.v_outer[zone])
for zone, _ in enumerate(electron_densities):
with open(
f"He_NLTE_Files/abundances_{zone}.txt", "w"
) as output_file:
for element in range(1, 31):
try:
number_density = (
ion_number_density[zone].loc[element].sum()
)
except:
number_density = 0.0
output_file.write(number_density)
helium_lines = lines[lines["atomic_number"] == 2]
helium_lines = helium_lines[helium_lines["ion_number"] == 0]
for zone, _ in enumerate(electron_densities):
with open(
f"He_NLTE_Files/discradfield_{zone}.txt", "w"
) as output_file:
j_blues = pd.DataFrame(j_blues, index=lines.index)
helium_j_blues = j_blues[zone].loc[helium_lines.index]
for value in helium_lines.index:
if helium_lines.level_number_lower.loc[value] < 35:
output_file.write(
int(helium_lines.level_number_lower.loc[value] + 1),
int(helium_lines.level_number_upper.loc[value] + 1),
j_blues[zone].loc[value],
)
# Running numerical simulations
for zone, _ in enumerate(electron_densities):
os.rename(
f"He_NLTE_Files/abundances{zone}.txt",
"He_NLTE_Files/abundances_current.txt",
)
os.rename(
f"He_NLTE_Files/shellconditions{zone}.txt",
"He_NLTE_Files/shellconditions_current.txt",
)
os.rename(
f"He_NLTE_Files/discradfield{zone}.txt",
"He_NLTE_Files/discradfield_current.txt",
)
os.system("nlte-solver-module/bin/nlte_solvertest >/dev/null")
os.rename(
"He_NLTE_Files/abundances_current.txt",
f"He_NLTE_Files/abundances{zone}.txt",
)
os.rename(
"He_NLTE_Files/shellconditions_current.txt",
f"He_NLTE_Files/shellconditions{zone}.txt",
)
os.rename(
"He_NLTE_Files/discradfield_current.txt",
f"He_NLTE_Files/discradfield{zone}.txt",
)
os.rename("debug_occs.dat", f"He_NLTE_Files/occs{zone}.txt")
# Reading in populations from files
helium_population = level_boltzmann_factor.loc[2].copy()
for zone, _ in enumerate(electron_densities):
with open(
f"He_NLTE_Files/discradfield{zone}.txt", "r"
) as read_file:
for level in range(0, 35):
level_population = read_file.readline()
level_population = float(level_population)
helium_population[zone].loc[0, level] = level_population
helium_population[zone].loc[1, 0] = float(read_file.readline())
# Performing He LTE level populations (upper two energy levels,
# He II excited states, He III)
he_one_population = HeliumNLTE.calculate_helium_one(
g_electron,
beta_rad,
partition_function,
ionization_data,
level_boltzmann_factor,
electron_densities,
g,
w,
t_rad,
t_electrons,
)
helium_population.loc[0, 35].update(he_one_population.loc[35])
helium_population.loc[0, 36].update(he_one_population.loc[36])
he_two_population = level_boltzmann_factor.loc[2, 1, 1:].mul(
(g.loc[2, 1, 0] ** (-1)) * helium_population.loc[s1, 0]
)
helium_population.loc[1, 1:].update(he_two_population)
helium_population.loc[2, 0] = HeliumNLTE.calculate_helium_three(
t_rad,
w,
zeta_data,
t_electrons,
delta,
g_electron,
beta_rad,
partition_function,
ionization_data,
electron_densities,
)
unnormalised = helium_population.sum()
normalised = helium_population.mul(
ion_number_density.loc[2].sum() / unnormalised
)
helium_population.update(normalised)
return helium_population
| bsd-3-clause |
bionet/ted.python | demos/iaf_delay_demo.py | 1 | 3443 | #!/usr/bin/env python
"""
Demos of MIMO time encoding and decoding algorithms that use IAF
neurons with delays.
"""
# Copyright (c) 2009-2015, Lev Givon
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
import numpy as np
# Set matplotlib backend so that plots can be generated without a
# display:
import matplotlib
matplotlib.use('AGG')
from bionet.utils.misc import func_timer
import bionet.utils.band_limited as bl
import bionet.utils.plotting as pl
import bionet.ted.iaf as iaf
# For determining output plot file names:
output_name = 'iaf_delay_demo_'
output_count = 0
output_ext = '.png'
# Define input signal generation parameters:
T = 0.05
dur = 2*T
dt = 1e-6
f = 100
bw = 2*np.pi*f
np.random.seed(0)
noise_power = None
comps = 8
if noise_power == None:
fig_title = 'IAF Input Signal with No Noise'
else:
fig_title = 'IAF Input Signal with %d dB of Noise' % noise_power
M = 3 # number of input signals
N = 9 # number of neurons
# Starting and ending points of interval that is encoded:
t_start = 0.02
t_end = t_start+T
if t_end > dur:
raise ValueError('t_start is too large')
k_start = int(np.round(t_start/dt))
k_end = int(np.round(t_end/dt))
t_enc = np.arange(k_start, k_end, dtype=np.float)*dt
u_list = []
for i in xrange(M):
fig_title_in = fig_title + ' (Signal #' + str(i+1) + ')'
print fig_title_in
u = func_timer(bl.gen_band_limited)(dur, dt, f, noise_power, comps)
u /= max(u)
u *= 1.5
pl.plot_signal(t_enc, u[k_start:k_end], fig_title_in,
output_name + str(output_count) + output_ext)
u_list.append(u)
output_count += 1
t = np.arange(len(u_list[0]), dtype=np.float)*dt
# Define neuron parameters:
def randu(a, b, *d):
"""Create an array of the given shape and propagate it with random
samples from a uniform distribution over ``[a, b)``."""
if a >= b:
raise ValueError('b must exceed a')
return a+(b-a)*np.random.rand(*d)
b_list = list(randu(2.3, 3.3, N))
d_list = list(randu(0.15, 0.25, N))
k_list = list(0.01*np.ones(N))
a_list = map(list, np.reshape(np.random.exponential(0.003, N*M), (N, M)))
w_list = map(list, np.reshape(randu(0.5, 1.0, N*M), (N, M)))
fig_title = 'Signal Encoded Using Delayed IAF Encoder'
print fig_title
s_list = func_timer(iaf.iaf_encode_delay)(u_list, t_start, dt, b_list, d_list,
k_list, a_list, w_list)
for i in xrange(M):
for j in xrange(N):
fig_title_out = fig_title + '\n(Signal #' + str(i+1) + \
', Neuron #' + str(j+1) + ')'
pl.plot_encoded(t_enc, u_list[i][k_start:k_end],
s_list[j][np.cumsum(s_list[j])<T],
fig_title_out,
output_name + str(output_count) + output_ext)
output_count += 1
fig_title = 'Signal Decoded Using Delayed IAF Decoder'
print fig_title
u_rec_list = func_timer(iaf.iaf_decode_delay)(s_list, T, dt,
b_list, d_list, k_list,
a_list, w_list)
for i in xrange(M):
fig_title_out = fig_title + ' (Signal #' + str(i+1) + ')'
pl.plot_compare(t_enc, u_list[i][k_start:k_end],
u_rec_list[i][0:k_end-k_start], fig_title_out,
output_name + str(output_count) + output_ext)
output_count += 1
| bsd-3-clause |
rahuldhote/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Static_Normal_Contact/Normal_Behviour/SoftContact_NonLinHardSoftShear/plot.py | 1 | 1585 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import sys
import numpy as np;
################ Node # 2 Displacement #############################
#######################################
## Analytical Solution
#######################################
finput = h5py.File('Analytical_Solution.feioutput')
plt.figure()
# Read the time and displacement
times = finput["time"][:]
normal_strain = finput["/Model/Elements/Element_Outputs"][6,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
# plt.plot(normal_strain,normal_stress,'-r',Linewidth=4,label='Analytical Solution')
plt.hold(True)
#######################################
## Current Solution
#######################################
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
# Read the time and displacement
times = finput["time"][:]
normal_strain = finput["/Model/Elements/Element_Outputs"][6,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
# Configure the figure filename, according to the input filename.
outfig=thefile.replace("_","-")
outfigname=outfig.replace("h5.feioutput","pdf")
# Plot the figure. Add labels and titles.
plt.plot(normal_strain,normal_stress,'-k',Linewidth=4,label='Numerical Solution')
plt.xlabel(r"Normal Strain $\epsilon$")
plt.ylabel(r"Normal Stress $\sigma$")
plt.legend()
plt.savefig("Contact_Normal_Interface_Behavour.pdf", bbox_inches='tight')
# plt.show()
# #####################################################################
| cc0-1.0 |
CVML/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 205 | 10378 | # Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from sklearn.feature_extraction.image import (
img_to_graph, grid_to_graph, extract_patches_2d,
reconstruct_from_patches_2d, PatchExtractor, extract_patches)
from sklearn.utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(ValueError, extract_patches_2d, x, (4, 1))
assert_raises(ValueError, extract_patches_2d, x, (1, 4))
| bsd-3-clause |
hakujyo/chessplaying_robot | matchpicture.py | 1 | 1381 |
import cv2
import numpy as np
import time
from matplotlib import pyplot as plt
x = 0
y = 0
def makematchpicture(Grayurl):
img = cv2.imread(Grayurl, 0)
img2 = img.copy()
template = cv2.imread('test.png', 0)
w, h = template.shape[::-1]
methods = ['cv2.TM_SQDIFF']
for meth in methods:
img = img2.copy()
method = eval(meth)
# Apply template Matching
res = cv2.matchTemplate(img, template, method)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
# If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
top_left = min_loc
else:
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
print("模板匹配:",int(top_left[0] + w / 2), int(top_left[1] + h / 2))
cv2.rectangle(img, top_left, bottom_right, 255, 2)
plt.figure()
plt.subplot(121), plt.imshow(res, cmap='gray')
plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
plt.subplot(122), plt.imshow(img, cmap='gray')
plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
plt.suptitle(meth)
global x
global y
x = int(top_left[0] + w / 2)
y = int(top_left[1] + h / 2)
| gpl-3.0 |
GoogleCloudPlatform/ml-on-gcp | tutorials/sklearn/hpsearch/gke_parallel.py | 1 | 16248 | # Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
from helpers.gke_helper import get_cluster
from helpers.gcs_helper import pickle_and_upload, get_uri_blob, download_uri_and_unpickle
from helpers.kubernetes_helper import create_job, delete_jobs_pods
from copy import deepcopy
from itertools import product
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from skopt import BayesSearchCV
from skopt.space import Categorical, Integer, Real
class GKEParallel(object):
SUPPORTED_SEARCH = [
GridSearchCV,
RandomizedSearchCV,
BayesSearchCV
]
def __init__(self, search, project_id, zone, cluster_id, bucket_name, image_name, task_name=None):
"""Wraps around a SearchCV object and handles deploying `fit`
jobs to a GKE cluster.
"""
if type(search) not in self.SUPPORTED_SEARCH:
raise TypeError('Search type {} not supported. Only supporting {}.'.format(type(search), [s.__name__ for s in self.SUPPORTED_SEARCH]))
self.search = search
self.project_id = project_id
self.cluster_id = cluster_id
self.bucket_name = bucket_name
self.image_name = image_name
self.task_name = task_name
self.gcs_uri = None
self.cluster = get_cluster(project_id, zone, cluster_id)
self.n_nodes = self.cluster['currentNodeCount']
self.task_name = None
# For GridSearchCV
self.param_grids = {}
# For RandomizedSearchCV
self.param_distributions = None
self.n_iter = None
# For BayesSearchCV
self.search_spaces = {}
self.job_names = {}
self.output_uris = {}
self.output_without_estimator_uris = {}
self.dones = {}
self.results = {}
self.best_estimator_ = None
self.best_params_ = None
self.best_score_ = None
self.best_search_ = None
self._cancelled = False
self._done = False
def _make_job_name(self, worker_id):
return '{}.worker.{}'.format(self.task_name, worker_id)
def _make_job_body(self, worker_id, X_uri, y_uri):
body = {
'apiVersion': 'batch/v1',
'kind': 'Job',
'metadata': {
'name': self._make_job_name(worker_id)
},
'spec': {
'template': {
'spec': {
'containers': [
{
'image': 'gcr.io/{}/{}'.format(self.project_id, self.image_name),
'command': ['python'],
'args': ['worker.py', self.bucket_name, self.task_name, worker_id, X_uri, y_uri],
'name': 'worker'
}
],
'restartPolicy': 'OnFailure'}
}
}
}
return body
def _deploy_job(self, worker_id, X_uri, y_uri):
job_body = self._make_job_body(worker_id, X_uri, y_uri)
print('Deploying worker {}'.format(worker_id))
create_job(job_body)
def _partition_grid(self, param_grid_dict, partition_keys):
_param_grid_dict = deepcopy(param_grid_dict)
partition_lists = [_param_grid_dict.pop(key) for key in partition_keys]
partitioned = []
for prod in product(*partition_lists):
lists = [[element] for element in prod]
singleton = dict(zip(partition_keys, lists))
singleton.update(_param_grid_dict)
partitioned.append(singleton)
return partitioned
def _partition_param_grid(self, param_grid, target_n_partition=5):
"""Returns a list of param_grids whose union is the input
param_grid.
If param_grid is a dict:
The implemented strategy attempts to partition the param_grid
into at least target_n_partition smaller param_grids.
NOTE: The naive strategy implemented here does not distinguish
between different types of parameters nor their impact on the
running time. The user of this module is encouraged to
implement their own paritioning strategy based on their needs.
"""
if type(param_grid) == list:
# If the input is already a list of param_grids then just
# use it as is.
return param_grid
else:
# The strategy is to simply expand the grid fully with
# respect to a parameter:
# [1, 2, 3]x[4, 5] --> [1]x[4, 5], [2]x[4, 5], [3]x[4, 5]
# until the target number of partitions is reached.
partition_keys = []
n_partition = 1
for key, lst in param_grid.items():
partition_keys.append(key)
n_partition *= len(lst)
if n_partition >= target_n_partition:
break
partitioned = self._partition_grid(param_grid, partition_keys)
return partitioned
def _handle_grid_search(self, X_uri, y_uri):
param_grids = self._partition_param_grid(self.search.param_grid, self.n_nodes)
for i, param_grid in enumerate(param_grids):
worker_id = str(i)
self.param_grids[worker_id] = param_grid
self.job_names[worker_id] = self._make_job_name(worker_id)
self.output_uris[worker_id] = 'gs://{}/{}/{}/fitted_search.pkl'.format(self.bucket_name, self.task_name, worker_id)
self.output_without_estimator_uris[worker_id] = 'gs://{}/{}/{}/fitted_search_without_estimator.pkl'.format(self.bucket_name, self.task_name, worker_id)
self.dones[worker_id] = False
pickle_and_upload(param_grid, self.bucket_name, '{}/{}/param_grid.pkl'.format(self.task_name, worker_id))
self._deploy_job(worker_id, X_uri, y_uri)
def _handle_randomized_search(self, X_uri, y_uri):
self.param_distributions = self.search.param_distributions
self.n_iter = self.search.n_iter
n_iter = self.n_iter / self.n_nodes + 1
for i in xrange(self.n_nodes):
worker_id = str(i)
self.job_names[worker_id] = self._make_job_name(worker_id)
self.output_uris[worker_id] = 'gs://{}/{}/{}/fitted_search.pkl'.format(self.bucket_name, self.task_name, worker_id)
self.output_without_estimator_uris[worker_id] = 'gs://{}/{}/{}/fitted_search_without_estimator.pkl'.format(self.bucket_name, self.task_name, worker_id)
self.dones[worker_id] = False
pickle_and_upload(self.param_distributions, self.bucket_name, '{}/{}/param_distributions.pkl'.format(self.task_name, worker_id))
pickle_and_upload(n_iter, self.bucket_name, '{}/{}/n_iter.pkl'.format(self.task_name, worker_id))
self._deploy_job(worker_id, X_uri, y_uri)
def _partition_space(self, space):
"""Partitions the space into two subspaces. In the case of
Real and Integer, the subspaces are not disjoint, but
overlapping at an endpoint.
The argument `space` should be a dict whose values are
skopt.space's Categorical, Integer, or Real.
"""
partition_key = np.random.choice(space.keys())
dimension = space[partition_key]
if type(dimension) == Categorical:
categories = dimension.categories
prior = dimension.prior
transform = dimension.transform_
if len(categories) >= 2:
mid_index = len(categories) / 2
left_categories = categories[:mid_index]
right_categories = categories[mid_index:]
if prior is not None:
left_prior = prior[:mid_index]
left_weight = sum(left_prior)
left_prior = [p/left_weight for p in left_prior]
right_prior = prior[mid_index:]
right_weight = sum(right_prior)
right_prior = [p/right_weight for p in right_prior]
else:
left_prior = None
right_prior = None
left = Categorical(left_categories, prior=left_prior, transform=transform)
right = Categorical(right_categories, prior=right_prior, transform=transform)
else:
return [space]
elif type(dimension) == Integer:
low = dimension.low
high = dimension.high
transform = dimension.transform_
if low < high:
mid = int((high - low) / 2)
left = Integer(low, mid, transform=transform)
right = Integer(mid, high, transform=transform)
else:
return [space]
elif type(dimension) == Real:
low = dimension.low
high = dimension.high
prior = dimension.prior
transform = dimension.transform_
if low < high:
mid = (high - low) / 2
left = Real(low, mid, prior=prior, transform=transform)
right = Real(mid, high, prior=prior, transform=transform)
else:
return [space]
left_space = deepcopy(space)
left_space[partition_key] = left
right_space = deepcopy(space)
right_space[partition_key] = right
return [left_space, right_space]
def _partition_search_spaces(self, search_spaces, target_n_partition=5):
"""Returns a list of search_spaces whose union is the input
search_spaces.
If search_spaces is a dict:
The implemented strategy attempts to partition the search_spaces
into at least target_n_partition smaller search_spaces.
NOTE: The naive strategy implemented here does not distinguish
between different types of parameters nor their impact on the
running time. The user of this module is encouraged to
implement their own paritioning strategy based on their needs.
"""
if type(search_spaces[0]) == tuple:
# If the input is already a list of search_spaces then just
# use it as is.
return search_spaces.values()
else:
result = search_spaces.values()
while len(result) < target_n_partition:
space = result.pop()
partitioned = self._partition_space(space)
result.extend(partitioned)
return result
def _handle_bayes_search(self, X_uri, y_uri):
partitioned_search_spaces = self._partition_search_spaces(self.search.search_spaces_, self.n_nodes)
for i, search_spaces in enumerate(partitioned_search_spaces):
worker_id = str(i)
self.search_spaces[worker_id] = search_spaces
self.job_names[worker_id] = self._make_job_name(worker_id)
self.output_uris[worker_id] = 'gs://{}/{}/{}/fitted_search.pkl'.format(self.bucket_name, self.task_name, worker_id)
self.output_without_estimator_uris[worker_id] = 'gs://{}/{}/{}/fitted_search_without_estimator.pkl'.format(self.bucket_name, self.task_name, worker_id)
self.dones[worker_id] = False
pickle_and_upload(search_spaces, self.bucket_name, '{}/{}/search_spaces.pkl'.format(self.task_name, worker_id))
self._deploy_job(worker_id, X_uri, y_uri)
def _upload_data(self, X, y):
if type(X) == str and X.startswith('gs://'):
X_uri = X
else:
X_uri = pickle_and_upload(X, self.bucket_name, '{}/X.pkl'.format(self.task_name))
if type(y) == str and y.startswith('gs://'):
y_uri = y
else:
y_uri = pickle_and_upload(y, self.bucket_name, '{}/y.pkl'.format(self.task_name))
search_uri = pickle_and_upload(self.search, self.bucket_name, '{}/search.pkl'.format(self.task_name))
return X_uri, y_uri, search_uri
def fit(self, X, y):
"""Deploys `fit` jobs to each worker in the cluster.
"""
timestamp = str(int(time.time()))
self.task_name = self.task_name or '{}.{}.{}'.format(self.cluster_id, self.image_name, timestamp)
self._done = False
self._cancelled = False
X_uri, y_uri, _ = self._upload_data(X, y)
if type(self.search) == GridSearchCV:
handler = self._handle_grid_search
elif type(self.search) == RandomizedSearchCV:
handler = self._handle_randomized_search
elif type(self.search) == BayesSearchCV:
handler = self._handle_bayes_search
print('Fitting {}'.format(type(self.search)))
handler(X_uri, y_uri)
self.persist()
def persist(self):
"""Pickle and upload self to GCS, allowing recovering of parallel
search objects across experiments.
"""
self.gcs_uri = pickle_and_upload(self, self.bucket_name, '{}/gke_search.pkl'.format(self.task_name))
print('Persisted the GKEParallel instance: {}'.format(self.gcs_uri))
# Implement part of the concurrent.future.Future interface.
def done(self):
if not self._done:
for worker_id, output_uri in self.output_uris.items():
print('Checking if worker {} is done'.format(worker_id))
self.dones[worker_id] = get_uri_blob(output_uri).exists()
self._done = all(self.dones.values())
return self._done
def cancel(self):
"""Deletes the kubernetes jobs.
Persisted data and the cluster will not be deleted."""
if not self._cancelled:
delete_jobs_pods(self.job_names.values())
self._cancelled = True
def cancelled(self):
return self._cancelled
def result(self, download=False):
if not self.done():
n_done = len(d for d in self.dones.values() if d)
print('Not done: {} out of {} workers completed.'.format(n_done, len(self.dones)))
return None
if not self.results or download:
for worker_id, output_uri in self.output_without_estimator_uris.items():
print('Getting result from worker {}'.format(worker_id))
self.results[worker_id] = download_uri_and_unpickle(output_uri)
self._aggregate_results(download)
self.persist()
return self.results
def _aggregate_results(self, download):
best_id = None
for worker_id, result in self.results.items():
if self.best_score_ is None or result.best_score_ > self.best_score_ or download:
self.best_score_ = result.best_score_
self.best_params_ = result.best_params_
best_id = worker_id
if download and self.best_estimator_ is None:
# Download only the best estimator among the workers.
print('Downloading the best estimator (worker {}).'.format(best_id))
output_uri = self.output_uris[best_id]
self.best_search_ = download_uri_and_unpickle(output_uri)
self.best_estimator_ = self.best_search_.best_estimator_
# Implement part of SearchCV interface by delegation.
def predict(self, *args, **kwargs):
return self.best_estimator_.predict(*args, **kwargs)
def predict_proba(self, *args, **kwargs):
return self.best_estimator_.predict_proba(*args, **kwargs)
def predict_log_proba(self, *args, **kwargs):
return self.best_estimator_.predict_log_proba(*args, **kwargs)
| apache-2.0 |
jjx02230808/project0223 | examples/decomposition/plot_ica_vs_pca.py | 306 | 3329 | """
==========================
FastICA on 2D point clouds
==========================
This example illustrates visually in the feature space a comparison by
results using two different component analysis techniques.
:ref:`ICA` vs :ref:`PCA`.
Representing ICA in the feature space gives the view of 'geometric ICA':
ICA is an algorithm that finds directions in the feature space
corresponding to projections with high non-Gaussianity. These directions
need not be orthogonal in the original feature space, but they are
orthogonal in the whitened feature space, in which all directions
correspond to the same variance.
PCA, on the other hand, finds orthogonal directions in the raw feature
space that correspond to directions accounting for maximum variance.
Here we simulate independent sources using a highly non-Gaussian
process, 2 student T with a low number of degrees of freedom (top left
figure). We mix them to create observations (top right figure).
In this raw observation space, directions identified by PCA are
represented by orange vectors. We represent the signal in the PCA space,
after whitening by the variance corresponding to the PCA vectors (lower
left). Running ICA corresponds to finding a rotation in this space to
identify the directions of largest non-Gaussianity (lower right).
"""
print(__doc__)
# Authors: Alexandre Gramfort, Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
###############################################################################
# Generate sample data
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(random_state=rng)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
if axis_list is not None:
colors = ['orange', 'red']
for color, axis in zip(colors, axis_list):
axis /= axis.std()
x_axis, y_axis = axis
# Trick to get legend to work
plt.plot(0.1 * x_axis, 0.1 * y_axis, linewidth=2, color=color)
plt.quiver(0, 0, x_axis, y_axis, zorder=11, width=0.01, scale=6,
color=color)
plt.hlines(0, -3, 3)
plt.vlines(0, -3, 3)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xlabel('x')
plt.ylabel('y')
plt.figure()
plt.subplot(2, 2, 1)
plot_samples(S / S.std())
plt.title('True Independent Sources')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 2, 2)
plot_samples(X / np.std(X), axis_list=axis_list)
legend = plt.legend(['PCA', 'ICA'], loc='upper right')
legend.set_zorder(100)
plt.title('Observations')
plt.subplot(2, 2, 3)
plot_samples(S_pca_ / np.std(S_pca_, axis=0))
plt.title('PCA recovered signals')
plt.subplot(2, 2, 4)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
| bsd-3-clause |
ishank08/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
phobson/seaborn | seaborn/algorithms.py | 3 | 4233 | """Algorithms to support fitting routines in seaborn plotting functions."""
from __future__ import division
import numpy as np
from scipy import stats
import warnings
from .external.six import string_types
from .external.six.moves import range
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default 10000
Number of iterations
axis : int, default None
Will pass axis to ``func`` as a keyword argument.
units : array, default None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
smooth : bool, default False
If True, performs a smoothed bootstrap (draws samples from a kernel
destiny estimate); only works for one-dimensional inputs and cannot
be used `units` is present.
func : string or callable, default np.mean
Function to call on the args that are passed in. If string, tries
to use as named method on numpy array.
random_seed : int | None, default None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
smooth = kwargs.get("smooth", False)
random_seed = kwargs.get("random_seed", None)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rs = np.random.RandomState(random_seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
# Allow for a function that is the name of a method on an array
if isinstance(func, string_types):
def f(x):
return getattr(x, func)()
else:
f = func
# Do the bootstrap
if smooth:
msg = "Smooth bootstraps are deprecated and will be removed."
warnings.warn(msg)
return _smooth_bootstrap(args, n_boot, f, func_kwargs)
if units is not None:
return _structured_bootstrap(args, n_boot, units, f,
func_kwargs, rs)
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n, n)
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(f(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, rs):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = rs.randint(0, n_units, n_units)
sample = [np.take(a, resampler, axis=0) for a in args]
lengths = map(len, sample[0])
resampler = [rs.randint(0, n, n) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)]
for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _smooth_bootstrap(args, n_boot, func, func_kwargs):
"""Bootstrap by resampling from a kernel density estimate."""
n = len(args[0])
boot_dist = []
kde = [stats.gaussian_kde(np.transpose(a)) for a in args]
for i in range(int(n_boot)):
sample = [a.resample(n).T for a in kde]
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
| bsd-3-clause |
emdodds/LCAversions | timing.py | 1 | 3230 | #This file will time various versions of LCA
from __future__ import division
import numpy as np
import sklearn.preprocessing as skp
from timeit import default_timer as timer
from LCAnumpy import lca as lcan
from LCAfortran import lca as lcaf
from LCAnumbaprog import lca as lcag
def main():
"""Profiles various versions of LCA."""
nshort = 6
tshort = 2
nmed = 3
tmed = 6
nlong = 1
#Setup variables for inference
numDict = int(2048)
numBatch = int(128)
dataSize = int(256)
dictsIn = np.random.randn(numDict,dataSize)
# LCA requires that dictionary be unit norm
dictsIn = skp.normalize(dictsIn, axis=1)
stimuli = np.random.randn(numBatch,dataSize)
batchCoeffs = np.random.randn(numBatch,numDict)
coeffs = np.zeros((numBatch, numDict))
eta = .01
lamb = .05
nIter = 300
adapt = .99
softThresh = 0
thresh = np.random.randn(numBatch)
#LCA
params = """Parameters:
numDict: """+str(numDict)+"""
numBatch: """+str(numBatch)+"""
dataSize: """+str(dataSize)+"""
nIter: """+str(nIter)+"""\n"""
print params
start = timer()
lcan.infer(dictsIn,stimuli,eta,lamb,nIter,adapt)
dt = timer()-start
if dt < tshort:
n_times = nshort
elif dt < tmed:
n_times = nmed
else:
n_times = nlong
for ii in xrange(n_times-1):
start = timer()
lcan.infer(dictsIn,stimuli,eta,lamb,nIter,adapt)
dt = dt+timer()-start
dt = dt/(n_times)
print '---------------Numpy based LCA----------------'
print 'Average time over '+str(n_times)+' trials:'
print '%f s' % dt
dictsIn = np.array(dictsIn,order='F')
stimuli = np.array(stimuli,order='F')
coeffs = np.array(coeffs,order='F')
batchCoeffs = np.array(batchCoeffs,order='F')
thresh = np.array(thresh,order='F')
start = timer()
lcaf.lca(dictsIn,stimuli,eta,lamb,nIter,softThresh,adapt,coeffs,batchCoeffs,thresh,numDict,numBatch,dataSize)
dt = timer()-start
if dt < tshort:
n_times = nshort
elif dt < tmed:
n_times = nmed
else:
n_times = nlong
for ii in xrange(n_times-1):
start = timer()
lcaf.lca(dictsIn,stimuli,eta,lamb,nIter,softThresh,adapt,coeffs,batchCoeffs,thresh,numDict,numBatch,dataSize)
dt = dt+timer()-start
dt = dt/(n_times)
print '---------------Fortran based LCA--------------'
print 'Average time over '+str(n_times)+' trials:'
print '%f s' % dt
dictsIn = np.array(dictsIn,dtype=np.float32,order='F')
stimuli = np.array(stimuli,dtype=np.float32,order='F')
start = timer()
lcag.infer(dictsIn,stimuli,eta,lamb,nIter,adapt)
dt = timer()-start
if dt < tshort:
n_times = nshort
elif dt < tmed:
n_times = nmed
else:
n_times = nlong
for ii in xrange(n_times-1):
start = timer()
lcag.infer(dictsIn,stimuli,eta,lamb,nIter,adapt)
dt = dt+timer()-start
dt = dt/(n_times)
print '----------------GPU based LCA-----------------'
print 'Average time over '+str(n_times)+' trials:'
print '%f s' % dt
if __name__ == '__main__':
main()
| mit |
simon-pepin/scikit-learn | sklearn/mixture/tests/test_gmm.py | 200 | 17427 | import unittest
import copy
import sys
from nose.tools import assert_true
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises)
from scipy import stats
from sklearn import mixture
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.externals.six.moves import cStringIO as StringIO
rng = np.random.RandomState(0)
def test_sample_gaussian():
# Test sample generation from mixture.sample_gaussian where covariance
# is diagonal, spherical and full
n_features, n_samples = 2, 300
axis = 1
mu = rng.randint(10) * rng.rand(n_features)
cv = (rng.rand(n_features) + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='diag', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(samples.var(axis), cv, atol=1.5))
# the same for spherical covariances
cv = (rng.rand() + 1.0) ** 2
samples = mixture.sample_gaussian(
mu, cv, covariance_type='spherical', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.5))
assert_true(np.allclose(
samples.var(axis), np.repeat(cv, n_features), atol=1.5))
# and for full covariances
A = rng.randn(n_features, n_features)
cv = np.dot(A.T, A) + np.eye(n_features)
samples = mixture.sample_gaussian(
mu, cv, covariance_type='full', n_samples=n_samples)
assert_true(np.allclose(samples.mean(axis), mu, atol=1.3))
assert_true(np.allclose(np.cov(samples), cv, atol=2.5))
# Numerical stability check: in SciPy 0.12.0 at least, eigh may return
# tiny negative values in its second return value.
from sklearn.mixture import sample_gaussian
x = sample_gaussian([0, 0], [[4, 3], [1, .1]],
covariance_type='full', random_state=42)
print(x)
assert_true(np.isfinite(x).all())
def _naive_lmvnpdf_diag(X, mu, cv):
# slow and naive implementation of lmvnpdf
ref = np.empty((len(X), len(mu)))
stds = np.sqrt(cv)
for i, (m, std) in enumerate(zip(mu, stds)):
ref[:, i] = np.log(stats.norm.pdf(X, m, std)).sum(axis=1)
return ref
def test_lmvnpdf_diag():
# test a slow and naive implementation of lmvnpdf and
# compare it to the vectorized version (mixture.lmvnpdf) to test
# for correctness
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
ref = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, cv, 'diag')
assert_array_almost_equal(lpr, ref)
def test_lmvnpdf_spherical():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
spherecv = rng.rand(n_components, 1) ** 2 + 1
X = rng.randint(10) * rng.rand(n_samples, n_features)
cv = np.tile(spherecv, (n_features, 1))
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, spherecv,
'spherical')
assert_array_almost_equal(lpr, reference)
def test_lmvnpdf_full():
n_features, n_components, n_samples = 2, 3, 10
mu = rng.randint(10) * rng.rand(n_components, n_features)
cv = (rng.rand(n_components, n_features) + 1.0) ** 2
X = rng.randint(10) * rng.rand(n_samples, n_features)
fullcv = np.array([np.diag(x) for x in cv])
reference = _naive_lmvnpdf_diag(X, mu, cv)
lpr = mixture.log_multivariate_normal_density(X, mu, fullcv, 'full')
assert_array_almost_equal(lpr, reference)
def test_lvmpdf_full_cv_non_positive_definite():
n_features, n_samples = 2, 10
rng = np.random.RandomState(0)
X = rng.randint(10) * rng.rand(n_samples, n_features)
mu = np.mean(X, 0)
cv = np.array([[[-1, 0], [0, 1]]])
expected_message = "'covars' must be symmetric, positive-definite"
assert_raise_message(ValueError, expected_message,
mixture.log_multivariate_normal_density,
X, mu, cv, 'full')
def test_GMM_attributes():
n_components, n_features = 10, 4
covariance_type = 'diag'
g = mixture.GMM(n_components, covariance_type, random_state=rng)
weights = rng.rand(n_components)
weights = weights / weights.sum()
means = rng.randint(-20, 20, (n_components, n_features))
assert_true(g.n_components == n_components)
assert_true(g.covariance_type == covariance_type)
g.weights_ = weights
assert_array_almost_equal(g.weights_, weights)
g.means_ = means
assert_array_almost_equal(g.means_, means)
covars = (0.1 + 2 * rng.rand(n_components, n_features)) ** 2
g.covars_ = covars
assert_array_almost_equal(g.covars_, covars)
assert_raises(ValueError, g._set_covars, [])
assert_raises(ValueError, g._set_covars,
np.zeros((n_components - 2, n_features)))
assert_raises(ValueError, mixture.GMM, n_components=20,
covariance_type='badcovariance_type')
class GMMTester():
do_test_eval = True
def _setUp(self):
self.n_components = 10
self.n_features = 4
self.weights = rng.rand(self.n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.randint(-20, 20, (self.n_components, self.n_features))
self.threshold = -0.5
self.I = np.eye(self.n_features)
self.covars = {
'spherical': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'tied': (make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I),
'diag': (0.1 + 2 * rng.rand(self.n_components,
self.n_features)) ** 2,
'full': np.array([make_spd_matrix(self.n_features, random_state=0)
+ 5 * self.I for x in range(self.n_components)])}
def test_eval(self):
if not self.do_test_eval:
return # DPGMM does not support setting the means and
# covariances before fitting There is no way of fixing this
# due to the variational parameters being more expressive than
# covariance matrices
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = self.covars[self.covariance_type]
g.weights_ = self.weights
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = rng.randn(n_samples, self.n_features) + g.means_[gaussidx]
ll, responsibilities = g.score_samples(X)
self.assertEqual(len(ll), n_samples)
self.assertEqual(responsibilities.shape,
(n_samples, self.n_components))
assert_array_almost_equal(responsibilities.sum(axis=1),
np.ones(n_samples))
assert_array_equal(responsibilities.argmax(axis=1), gaussidx)
def test_sample(self, n=100):
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type, random_state=rng)
# Make sure the means are far apart so responsibilities.argmax()
# picks the actual component used to generate the observations.
g.means_ = 20 * self.means
g.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
g.weights_ = self.weights
samples = g.sample(n)
self.assertEqual(samples.shape, (n, self.n_features))
def test_train(self, params='wmc'):
g = mixture.GMM(n_components=self.n_components,
covariance_type=self.covariance_type)
g.weights_ = self.weights
g.means_ = self.means
g.covars_ = 20 * self.covars[self.covariance_type]
# Create a training set by sampling from the predefined distribution.
X = g.sample(n_samples=100)
g = self.model(n_components=self.n_components,
covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-1,
n_iter=1, init_params=params)
g.fit(X)
# Do one training iteration at a time so we can keep track of
# the log likelihood to make sure that it increases after each
# iteration.
trainll = []
for _ in range(5):
g.params = params
g.init_params = ''
g.fit(X)
trainll.append(self.score(g, X))
g.n_iter = 10
g.init_params = ''
g.params = params
g.fit(X) # finish fitting
# Note that the log likelihood will sometimes decrease by a
# very small amount after it has more or less converged due to
# the addition of min_covar to the covariance (to prevent
# underflow). This is why the threshold is set to -0.5
# instead of 0.
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > self.threshold,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, self.threshold, self.covariance_type, trainll))
def test_train_degenerate(self, params='wmc'):
# Train on degenerate data with 0 in some dimensions
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, self.n_features)
X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-3, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
self.assertTrue(np.sum(np.abs(trainll / 100 / X.shape[1])) < 5)
def test_train_1d(self, params='wmc'):
# Train on 1-D data
# Create a training set by sampling from the predefined distribution.
X = rng.randn(100, 1)
# X.T[1:] = 0
g = self.model(n_components=2, covariance_type=self.covariance_type,
random_state=rng, min_covar=1e-7, n_iter=5,
init_params=params)
g.fit(X)
trainll = g.score(X)
if isinstance(g, mixture.DPGMM):
self.assertTrue(np.sum(np.abs(trainll / 100)) < 5)
else:
self.assertTrue(np.sum(np.abs(trainll / 100)) < 2)
def score(self, g, X):
return g.score(X).sum()
class TestGMMWithSphericalCovars(unittest.TestCase, GMMTester):
covariance_type = 'spherical'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithDiagonalCovars(unittest.TestCase, GMMTester):
covariance_type = 'diag'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithTiedCovars(unittest.TestCase, GMMTester):
covariance_type = 'tied'
model = mixture.GMM
setUp = GMMTester._setUp
class TestGMMWithFullCovars(unittest.TestCase, GMMTester):
covariance_type = 'full'
model = mixture.GMM
setUp = GMMTester._setUp
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, covariance_type='spherical',
random_state=rng, min_covar=1e-7, n_iter=5)
train1 = g.fit(X).score(X).sum()
g.n_init = 5
train2 = g.fit(X).score(X).sum()
assert_true(train2 >= train1 - 1.e-2)
def test_n_parameters():
# Test that the right number of parameters is estimated
n_samples, n_dim, n_components = 7, 5, 2
X = rng.randn(n_samples, n_dim)
n_params = {'spherical': 13, 'diag': 21, 'tied': 26, 'full': 41}
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_true(g._n_parameters() == n_params[cv_type])
def test_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
g_full = mixture.GMM(n_components=n_components, covariance_type='full',
random_state=rng, min_covar=1e-7, n_iter=1)
g_full.fit(X)
g_full_bic = g_full.bic(X)
for cv_type in ['tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7, n_iter=1)
g.fit(X)
assert_array_almost_equal(g.bic(X), g_full_bic)
def assert_fit_predict_correct(model, X):
model2 = copy.deepcopy(model)
predictions_1 = model.fit(X).predict(X)
predictions_2 = model2.fit_predict(X)
assert adjusted_rand_score(predictions_1, predictions_2) == 1.0
def test_fit_predict():
"""
test that gmm.fit_predict is equivalent to gmm.fit + gmm.predict
"""
lrng = np.random.RandomState(101)
n_samples, n_dim, n_comps = 100, 2, 2
mu = np.array([[8, 8]])
component_0 = lrng.randn(n_samples, n_dim)
component_1 = lrng.randn(n_samples, n_dim) + mu
X = np.vstack((component_0, component_1))
for m_constructor in (mixture.GMM, mixture.VBGMM, mixture.DPGMM):
model = m_constructor(n_components=n_comps, covariance_type='full',
min_covar=1e-7, n_iter=5,
random_state=np.random.RandomState(0))
assert_fit_predict_correct(model, X)
model = mixture.GMM(n_components=n_comps, n_iter=0)
z = model.fit_predict(X)
assert np.all(z == 0), "Quick Initialization Failed!"
def test_aic():
# Test the aic and bic criteria
n_samples, n_dim, n_components = 50, 3, 2
X = rng.randn(n_samples, n_dim)
SGH = 0.5 * (X.var() + np.log(2 * np.pi)) # standard gaussian entropy
for cv_type in ['full', 'tied', 'diag', 'spherical']:
g = mixture.GMM(n_components=n_components, covariance_type=cv_type,
random_state=rng, min_covar=1e-7)
g.fit(X)
aic = 2 * n_samples * SGH * n_dim + 2 * g._n_parameters()
bic = (2 * n_samples * SGH * n_dim +
np.log(n_samples) * g._n_parameters())
bound = n_dim * 3. / np.sqrt(n_samples)
assert_true(np.abs(g.aic(X) - aic) / n_samples < bound)
assert_true(np.abs(g.bic(X) - bic) / n_samples < bound)
def check_positive_definite_covars(covariance_type):
r"""Test that covariance matrices do not become non positive definite
Due to the accumulation of round-off errors, the computation of the
covariance matrices during the learning phase could lead to non-positive
definite covariance matrices. Namely the use of the formula:
.. math:: C = (\sum_i w_i x_i x_i^T) - \mu \mu^T
instead of:
.. math:: C = \sum_i w_i (x_i - \mu)(x_i - \mu)^T
while mathematically equivalent, was observed a ``LinAlgError`` exception,
when computing a ``GMM`` with full covariance matrices and fixed mean.
This function ensures that some later optimization will not introduce the
problem again.
"""
rng = np.random.RandomState(1)
# we build a dataset with 2 2d component. The components are unbalanced
# (respective weights 0.9 and 0.1)
X = rng.randn(100, 2)
X[-10:] += (3, 3) # Shift the 10 last points
gmm = mixture.GMM(2, params="wc", covariance_type=covariance_type,
min_covar=1e-3)
# This is a non-regression test for issue #2640. The following call used
# to trigger:
# numpy.linalg.linalg.LinAlgError: 2-th leading minor not positive definite
gmm.fit(X)
if covariance_type == "diag" or covariance_type == "spherical":
assert_greater(gmm.covars_.min(), 0)
else:
if covariance_type == "tied":
covs = [gmm.covars_]
else:
covs = gmm.covars_
for c in covs:
assert_greater(np.linalg.det(c), 0)
def test_positive_definite_covars():
# Check positive definiteness for all covariance types
for covariance_type in ["full", "tied", "diag", "spherical"]:
yield check_positive_definite_covars, covariance_type
def test_verbose_first_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# Create sample data
X = rng.randn(30, 5)
X[:10] += 2
g = mixture.GMM(n_components=2, n_init=2, verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
finally:
sys.stdout = old_stdout
| bsd-3-clause |
bikong2/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
wathen/PhD | MHD/FEniCS/MHD/Stabilised/Precond/MHDstabtest.py | 1 | 11477 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
from dolfin import *
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
#import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
import Solver as S
import MHDmatrixPrecondSetup as PrecondSetup
import NSprecondSetup
m = 7
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
NSave = np.zeros((m-1,1))
Mave = np.zeros((m-1,1))
TotalTime = np.zeros((m-1,1))
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
# parameters["form_compiler"]["quadrature_degree"] = 6
# parameters = CP.ParameterSetup()
mesh = UnitSquareMesh(nn,nn)
order = 1
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "DG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Velocity,Pressure,Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(4,1)
bcu = DirichletBC(W.sub(0),u0, boundary)
bcb = DirichletBC(W.sub(2),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, p, b, r) = TrialFunctions(W)
(v, q, c,s ) = TestFunctions(W)
kappa = 1.0
Mu_m =1e1
MU = 1.0
IterType = 'Full'
F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple
if kappa == 0:
F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple
else:
F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple
params = [kappa,Mu_m,MU]
# MO.PrintStr("Preconditioning MHD setup",5,"+","\n\n","\n\n")
HiptmairMatrices = PrecondSetup.MagneticSetup(Magnetic, Lagrange, b0, r0, 1e-6)
MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n")
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,HiptmairMatrices,1e-6,Neumann=Expression(("0","0")),options ="New", FS = "DG")
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
# pConst = - assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx)
x = Iter.u_prev(u_k,p_k,b_k,r_k)
KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(Pressure, MU)
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
# plot(b_k)
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,IterType,"DG")
RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params,"DG")
bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0")), boundary)
bcb = DirichletBC(W.sub(2),Expression(("0.0","0.0")), boundary)
bcr = DirichletBC(W.sub(3),Expression(("0.0")), boundary)
bcs = [bcu,bcb,bcr]
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-4 # tolerance
iter = 0 # iteration counter
maxiter = 40 # max no of iterations allowed
SolutionTime = 0
outer = 0
# parameters['linear_algebra_backend'] = 'uBLAS'
if IterType == "CD":
AA, bb = assemble_system(maxwell+ns, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
# u = b.duplicate()
# P = CP.Assemble(PP)
u_is = PETSc.IS().createGeneral(range(FSpaces[0].dim()))
b_is = PETSc.IS().createGeneral(range(FSpaces[0].dim()+FSpaces[1].dim(),FSpaces[0].dim()+FSpaces[1].dim()+FSpaces[2].dim()))
NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim()))
M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim()))
OuterTol = 1e-5
InnerTol = 1e-3
NSits =0
Mits =0
TotalStart =time.time()
SolutionTime = 0
while eps > tol and iter < maxiter:
iter += 1
MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n")
tic()
if IterType == "CD":
bb = assemble((Lmaxwell + Lns) - RHSform)
for bc in bcs:
bc.apply(bb)
A,b = CP.Assemble(AA,bb)
# if iter == 1
if iter == 1:
u = b.duplicate()
F = A.getSubMatrix(u_is,u_is)
kspF = NSprecondSetup.LSCKSPnonlinear(F)
else:
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
F = A.getSubMatrix(u_is,u_is)
kspF = NSprecondSetup.LSCKSPnonlinear(F)
# if iter == 1:
if iter == 1:
u = b.duplicate()
print ("{:40}").format("MHD assemble, time: "), " ==> ",("{:4f}").format(toc()), ("{:9}").format(" time: "), ("{:4}").format(time.strftime('%X %x %Z')[0:5])
kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k)
print "Inititial guess norm: ", u.norm()
stime = time.time()
# ksp.solve(b, u)
u,it1,it2 = S.solve(A,b,u,[NS_is,M_is],FSpaces,IterType,OuterTol,InnerTol,HiptmairMatrices,KSPlinearfluids,kspF,Fp,MatrixLinearFluids,kspFp)
Soltime = time.time()- stime
NSits += it1
Mits +=it2
SolutionTime = SolutionTime +Soltime
u1, p1, b1, r1, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx)
u_k.assign(u1)
p_k.assign(p1)
b_k.assign(b1)
r_k.assign(r1)
# if eps > 100 and iter > 3:
# print 22222
# break
uOld= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
# iter = 10000
# u_k,b_k,epsu,epsb=Iter.PicardTolerance(x,u_k,b_k,FSpaces,dim,"inf",iter)
SolTime[xx-1] = SolutionTime/iter
NSave[xx-1] = (float(NSits)/iter)
Mave[xx-1] = (float(Mits)/iter)
iterations[xx-1] = iter
TotalTime[xx-1] = time.time() - TotalStart
ue =u0
pe = p0
be = b0
re = r0
# ExactSolution = [ue,pe,be,re]
# errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(x,mesh,FSpaces,ExactSolution,order,dim, "DG")
# if xx > 1:
# l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
# H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
# l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
# l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
# Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
# l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
# H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
import pandas as pd
# LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
# LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
# LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
# pd.set_option('precision',3)
# LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
# LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
# LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
# print LatexTable
# print "\n\n Magnetic convergence"
# MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"]
# MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1)
# MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
# pd.set_option('precision',3)
# MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
# MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
# MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
# print MagneticTable
# print "\n\n Lagrange convergence"
# LagrangeTitles = ["l","SolTime","B DoF","R DoF","R-L2","L2-order","R-H1","H1-order"]
# LagrangeValues = np.concatenate((level,SolTime,Magneticdim,Lagrangedim,errL2r,l2rorder,errH1r,H1rorder),axis=1)
# LagrangeTable= pd.DataFrame(LagrangeValues, columns = LagrangeTitles)
# pd.set_option('precision',3)
# LagrangeTable = MO.PandasFormat(LagrangeTable,"R-L2","%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'R-H1',"%2.4e")
# LagrangeTable = MO.PandasFormat(LagrangeTable,"H1-order","%1.2f")
# LagrangeTable = MO.PandasFormat(LagrangeTable,'L2-order',"%1.2f")
# print LagrangeTable
print "\n\n Iteration table"
if IterType == "Full":
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",]
else:
IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"]
IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,NSave,Mave),axis=1)
IterTable= pd.DataFrame(IterValues, columns = IterTitles)
if IterType == "Full":
IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f")
else:
IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f")
IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f")
print IterTable.to_latex()
print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol
# # # if (ShowResultPlots == 'yes'):
# plot(u_k)
# plot(interpolate(ue,Velocity))
# plot(p_k)
# plot(interpolate(pe,Pressure))
# plot(b_k)
# plot(interpolate(be,Magnetic))
# plot(r_k)
# plot(interpolate(re,Lagrange))
# interactive()
interactive()
| mit |
anmolshkl/oppia-ml | decision_tree.py | 1 | 1354 | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import metrics
from sklearn import tree
import numpy as np
import os
import random
import timeit
import yaml
import load_data
import utilities
def main():
# Load data
X_train, y_train, X_test, y_test = load_data.load_data()
# Transform data into a vector of TF-IDF values
count_vect = CountVectorizer(ngram_range=(1, 2))
X_train_counts = count_vect.fit_transform(X_train)
tfidf_transformer = TfidfTransformer(use_idf=True)
X_train_dtm = tfidf_transformer.fit_transform(X_train_counts)
# Transform test data
X_test_counts = count_vect.transform(X_test)
X_test_dtm = tfidf_transformer.fit_transform(X_test_counts)
# using default params, criterion=gini, max_depth=None ...
clf = tree.DecisionTreeClassifier()
clf.fit(X_train_dtm, y_train)
y_pred_class = clf.predict(X_test_dtm)
# utilities.print_misclassified_samples(X_test, y_pred_class, y_test)
utilities.print_stats(y_pred_class, y_test)
if __name__ == '__main__':
# probably not the best way to measure time, but, we only want a ballpark figure
execution_time = timeit.timeit("main()", setup="from __main__ import main", number=1)
print "Execution time={0} sec".format(execution_time) | apache-2.0 |
jadhavhninad/-CSE_515_MWD_Analytics- | Phase 3/Phase3_code/matrix_factorization_recommender.py | 2 | 3728 | from mysqlConn import DbConnect
import argparse
from numpy import *
import operator
import pandas as pd
#DB connector and curosor
db = DbConnect()
db_conn = db.get_connection()
cur2 = db_conn.cursor();
#Argument parser
parser = argparse.ArgumentParser()
parser.add_argument("USER")
args = parser.parse_args()
#=====================================================================
#Task:7 - Generate a rating of all movies for a user and sort them
#=====================================================================
#After reconstruction, we loose the column and row header names. so we need to do
#some mapping.
with open("R_final.csv") as f:
ncols = len(f.readline().split('\t'))
R_final = pd.DataFrame(loadtxt('R_final.csv',delimiter='\t', skiprows=1, usecols=range(1,ncols)))
#Get user_ids and movie_ids
with open("user_ids.csv") as f:
ncols_u = len(f.readline().split('\t'))
with open("movie_ids.csv") as f:
ncols_m = len(f.readline().split('\t'))
user_list = list(loadtxt('user_ids.csv',delimiter='\t', skiprows=1, usecols=range(1,ncols_u)))
movie_list = list(loadtxt('movie_ids.csv',delimiter='\t', skiprows=1, usecols=range(1,ncols_m)))
#print user_list
#print movie_list
user_id = args.USER
user_index = user_list.index(float(user_id))
#print R_final.columns[user_index]
movie_itr = 0
movie_recommendations={}
for index, row in R_final.iterrows():
#print row[user_index]
#Map the generated rating values for a user to a given movie. Get a ceiling rating.
#print movie_list[movie_itr]
movie_recommendations[movie_list[movie_itr]] = abs(ceil(row[user_index]))
if movie_recommendations[movie_list[movie_itr]] <= 0:
movie_recommendations[movie_list[movie_itr]] = 0.5
# Such movies will be recommended below the 1 rating movie
#print movie_recommendations[movie_list[movie_itr]]
movie_itr+=1
#print "--------Generated movie recommendations---------"
#print movie_recommendations
#============================================================
#Get a list of user watched movies
#============================================================
userWatchedMovies = []
cur2.execute("SELECT movieid FROM `mlratings` where userid = %s",[args.USER])
result0 = cur2.fetchall()
for data in result0:
#print type(data[0])
userWatchedMovies.append(data[0])
cur2.execute("SELECT movieid FROM `mltags` where userid = %s",[args.USER])
result0 = cur2.fetchall()
for data in result0:
userWatchedMovies.append(data[0])
print "-----Watched movies-------"
for watched_ids in userWatchedMovies:
cur2.execute("SELECT moviename,genres FROM `mlmovies` where movieid = %s", {watched_ids, })
print cur2.fetchone()
#print "----Watched movies-------"
#print userWatchedMovies
#=======================================================
#Filter out ratings of watched movies
#=======================================================
userNotWatched={}
for mv_id in movie_recommendations:
mid = str(int(mv_id))
#print type(mid)
if mid in userWatchedMovies:
#print "watched found"
continue
else:
userNotWatched[mid] = movie_recommendations[mv_id]
#print "---- movies not watched---"
#print userNotWatched
#print "----Sorted movie recommendations-----"
movie_recommendations_sorted = sorted(userNotWatched.items(), key=operator.itemgetter(1), reverse=True)
#print movie_recommendations_sorted
#Return top 5 unwatched movies in the generated recommendations
print "-------Top 5 Recommended movies------"
for i in range(0,5,1):
print movie_recommendations_sorted[i]
cur2.execute("SELECT moviename,genres FROM `mlmovies` where movieid like %s", {movie_recommendations_sorted[i][0], })
print cur2.fetchone()
| gpl-3.0 |
ml31415/numpy-groupies | numpy_groupies/benchmarks/simple.py | 1 | 4248 | #!/usr/bin/python -B
# -*- coding: utf-8 -*-
from __future__ import print_function
import timeit
import numpy as np
from numpy_groupies.utils import aliasing
from numpy_groupies import aggregate_py, aggregate_np, aggregate_ufunc
from numpy_groupies.aggregate_pandas import aggregate as aggregate_pd
def aggregate_group_loop(*args, **kwargs):
"""wraps func in lambda which prevents aggregate_numpy from
recognising and optimising it. Instead it groups and loops."""
func = kwargs['func']
del kwargs['func']
return aggregate_np(*args, func=lambda x: func(x), **kwargs)
print("TODO: use more extensive tests")
print("")
print("-----simple examples----------")
test_a = np.array([12.0, 3.2, -15, 88, 12.9])
test_group_idx = np.array([1, 0, 1, 4, 1 ])
print("test_a: ", test_a)
print("test_group_idx: ", test_group_idx)
print("aggregate(test_group_idx, test_a):")
print(aggregate_np(test_group_idx, test_a)) # group vals by idx and sum
# array([3.2, 9.9, 0., 0., 88.])
print("aggregate(test_group_idx, test_a, sz=8, func='min', fill_value=np.nan):")
print(aggregate_np(test_group_idx, test_a, size=8, func='min', fill_value=np.nan))
# array([3.2, -15., nan, 88., nan, nan, nan, nan])
print("aggregate(test_group_idx, test_a, sz=5, func=lambda x: ' + '.join(str(xx) for xx in x),fill_value='')")
print(aggregate_np(test_group_idx, test_a, size=5, func=lambda x: ' + '.join(str(xx) for xx in x), fill_value=''))
print("")
print("---------testing--------------")
print("compare against group-and-loop with numpy")
testable_funcs = {aliasing[f]: f for f in (np.sum, np.prod, np.any, np.all, np.min, np.max, np.std, np.var, np.mean)}
test_group_idx = np.random.randint(0, int(1e3), int(1e5))
test_a = np.random.rand(int(1e5)) * 100 - 50
test_a[test_a > 25] = 0 # for use with bool functions
for name, f in testable_funcs.items():
numpy_loop_group = aggregate_group_loop(test_group_idx, test_a, func=f)
for acc_func, acc_name in [(aggregate_np, 'np-optimised'),
(aggregate_ufunc, 'np-ufunc-at'),
(aggregate_py, 'purepy'),
(aggregate_pd, 'pandas')]:
try:
test_out = acc_func(test_group_idx, test_a, func=name)
test_out = np.asarray(test_out)
if not np.allclose(test_out, numpy_loop_group.astype(test_out.dtype)):
print(name, acc_name, "FAILED test, output: [" + acc_name + "; correct]...")
print(np.vstack((test_out, numpy_loop_group)))
else:
print(name, acc_name, "PASSED test")
except NotImplementedError:
print(name, acc_name, "NOT IMPLEMENTED")
print("")
print("----------benchmarking-------------")
print("Note that the actual observed speedup depends on a variety of properties of the input.")
print("Here we are using 100,000 indices uniformly picked from [0, 1000).")
print("Specifically, about 25% of the values are 0 (for use with bool operations),")
print("the remainder are uniformly distribuited on [-50,25).")
print("Times are scaled to 10 repetitions (actual number of reps used may not be 10).")
print(''.join(['function'.rjust(8), 'pure-py'.rjust(14), 'np-grouploop'.rjust(14),
'np-ufuncat'.rjust(14), 'np-optimised'.rjust(14), 'pandas'.rjust(14),
'ratio'.rjust(15)]))
for name, f in testable_funcs.items():
print(name.rjust(8), end='')
times = [None] * 5
for ii, acc_func in enumerate([aggregate_py, aggregate_group_loop,
aggregate_ufunc, aggregate_np,
aggregate_pd]):
try:
func = f if acc_func is aggregate_group_loop else name
reps = 3 if acc_func is aggregate_py else 20
times[ii] = timeit.Timer(lambda: acc_func(test_group_idx, test_a, func=func)).timeit(number=reps) / reps * 10
print(("%.1fms" % ((times[ii] * 1000))).rjust(13), end='')
except NotImplementedError:
print("no-impl".rjust(13), end='')
denom = min(t for t in times if t is not None)
ratios = [("-".center(4) if t is None else str(round(t / denom, 1))).center(5) for t in times]
print(" ", (":".join(ratios)))
| bsd-2-clause |
NunoEdgarGub1/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/finance.py | 69 | 20558 | """
A collection of modules for collecting, analyzing and plotting
financial data. User contributions welcome!
"""
#from __future__ import division
import os, time, warnings
from urllib import urlopen
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
try: import datetime
except ImportError:
raise ImportError('The finance module requires datetime support (python2.3)')
import numpy as np
from matplotlib import verbose, get_configdir
from dates import date2num
from matplotlib.cbook import Bunch
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.colors import colorConverter
from lines import Line2D, TICKLEFT, TICKRIGHT
from patches import Rectangle
from matplotlib.transforms import Affine2D
configdir = get_configdir()
cachedir = os.path.join(configdir, 'finance.cache')
def parse_yahoo_historical(fh, asobject=False, adjusted=True):
"""
Parse the historical data in file handle fh from yahoo finance and return
results as a list of
d, open, close, high, low, volume
where d is a floating poing representation of date, as returned by date2num
if adjust=True, use adjusted prices
"""
results = []
lines = fh.readlines()
datefmt = None
for line in lines[1:]:
vals = line.split(',')
if len(vals)!=7: continue
datestr = vals[0]
if datefmt is None:
try:
datefmt = '%Y-%m-%d'
dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
except ValueError:
datefmt = '%d-%b-%y' # Old Yahoo--cached file?
dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
d = date2num(dt)
open, high, low, close = [float(val) for val in vals[1:5]]
volume = int(vals[5])
if adjusted:
aclose = float(vals[6])
m = aclose/close
open *= m
high *= m
low *= m
close = aclose
results.append((d, open, close, high, low, volume))
results.reverse()
if asobject:
if len(results)==0: return None
else:
date, open, close, high, low, volume = map(np.asarray, zip(*results))
return Bunch(date=date, open=open, close=close, high=high, low=low, volume=volume)
else:
return results
def fetch_historical_yahoo(ticker, date1, date2, cachename=None):
"""
Fetch historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
Ex:
fh = fetch_historical_yahoo('^GSPC', d1, d2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
a file handle is returned
"""
ticker = ticker.upper()
d1 = (date1.month-1, date1.day, date1.year)
d2 = (date2.month-1, date2.day, date2.year)
urlFmt = 'http://table.finance.yahoo.com/table.csv?a=%d&b=%d&c=%d&d=%d&e=%d&f=%d&s=%s&y=0&g=d&ignore=.csv'
url = urlFmt % (d1[0], d1[1], d1[2],
d2[0], d2[1], d2[2], ticker)
if cachename is None:
cachename = os.path.join(cachedir, md5(url).hexdigest())
if os.path.exists(cachename):
fh = file(cachename)
verbose.report('Using cachefile %s for %s'%(cachename, ticker))
else:
if not os.path.isdir(cachedir): os.mkdir(cachedir)
fh = file(cachename, 'w')
fh.write(urlopen(url).read())
fh.close()
verbose.report('Saved %s data to cache file %s'%(ticker, cachename))
fh = file(cachename, 'r')
return fh
def quotes_historical_yahoo(ticker, date1, date2, asobject=False, adjusted=True, cachename=None):
"""
Get historical data for ticker between date1 and date2. date1 and
date2 are datetime instances
results are a list of tuples
(d, open, close, high, low, volume)
where d is a floating poing representation of date, as returned by date2num
if asobject is True, the return val is an object with attrs date,
open, close, high, low, volume, which are equal length arrays
if adjust=True, use adjusted prices
Ex:
sp = f.quotes_historical_yahoo('^GSPC', d1, d2, asobject=True, adjusted=True)
returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
[n,bins,patches] = hist(returns, 100)
mu = mean(returns)
sigma = std(returns)
x = normpdf(bins, mu, sigma)
plot(bins, x, color='red', lw=2)
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
"""
fh = fetch_historical_yahoo(ticker, date1, date2, cachename)
try: ret = parse_yahoo_historical(fh, asobject, adjusted)
except IOError, exc:
warnings.warn('urlopen() failure\n' + url + '\n' + exc.strerror[1])
return None
return ret
def plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""
quotes is a list of (time, open, close, high, low, ...) tuples
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
time must be in float date format - see date2num
ax : an Axes instance to plot to
ticksize : open/close tick marker in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
lines = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open : color = colorup
else : color = colordown
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
antialiased=False, # no need to antialias vert lines
)
oline = Line2D(
xdata=(t, t), ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize,
)
cline = Line2D(
xdata=(t, t), ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
def candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
quotes is a list of (time, open, close, high, low, ...) tuples.
As long as the first 5 elements of the tuples are these values,
the tuple can be as long as you want (eg it may store volume).
time must be in float days format - see date2num
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
ax : an Axes instance to plot to
width : fraction of a day for the rectangle width
colorup : the color of the rectangle where close >= open
colordown : the color of the rectangle where close < open
alpha : the rectangle alpha level
return value is lines, patches where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width/2.0
lines = []
patches = []
for q in quotes:
t, open, close, high, low = q[:5]
if close>=open :
color = colorup
lower = open
height = close-open
else :
color = colordown
lower = close
height = open-close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color='k',
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy = (t-OFFSET, lower),
width = width,
height = height,
facecolor = color,
edgecolor = color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def plot_day_summary2(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
ax : an Axes instance to plot to
ticksize : size of open and close ticks in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
return value is a list of lines added
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [ ((-ticksize, 0), (0, 0)) ]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [ ((0, 0), (ticksize, 0)) ]
offsetsOpen = [ (i, open) for i, open in zip(xrange(len(opens)), opens) if open != -1 ]
offsetsClose = [ (i, close) for i, close in zip(xrange(len(closes)), closes) if close != -1 ]
scale = ax.figure.dpi * (1.0/72.0)
tickTransform = Affine2D().scale(scale, 0.0)
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,1
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,1
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(rangeSegments)==len(offsetsOpen))
assert(len(offsetsOpen)==len(offsetsClose))
assert(len(offsetsClose)==len(colors))
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors = colors,
linewidths = lw,
antialiaseds = useAA,
)
openCollection = LineCollection(openSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsOpen,
transOffset = ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors = colors,
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsClose,
transOffset = ax.transData,
)
closeCollection.set_transform(tickTransform)
minpy, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""
Represent the open, close as a bar line and high low range as a
vertical line.
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
return value is lineCollection, barCollection
"""
# note this code assumes if any value open, close, low, high is
# missing they all are missing
delta = width/2.
barVerts = [ ( (i-delta, open), (i-delta, close), (i+delta, close), (i+delta, open) ) for i, open, close in zip(xrange(len(opens)), opens, closes) if open != -1 and close!=-1 ]
rangeSegments = [ ((i, low), (i, high)) for i, low, high in zip(xrange(len(lows)), lows, highs) if low != -1 ]
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
assert(len(barVerts)==len(rangeSegments))
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors = ( (0,0,0,1), ),
linewidths = lw,
antialiaseds = useAA,
)
barCollection = PolyCollection(barVerts,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
)
minx, maxx = 0, len(rangeSegments)
miny = min([low for low in lows if low !=-1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
ax.add_collection(rangeCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value is missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
colors = [colord[open<close] for open, close in zip(opens, closes) if open!=-1 and close !=-1]
delta = width/2.
bars = [ ( (i-delta, 0), (i-delta, v), (i+delta, v), (i+delta, 0)) for i, v in enumerate(volumes) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = (0,),
linewidths = (0.5,),
)
corners = (0, 0), (len(bars), max(volumes))
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
return barCollection
def volume_overlay2(ax, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The closes are used to
determine the color of the bar. -1 is missing. If a value is
missing on one it must be missing on all
ax : an Axes instance to plot to
width : the bar width in points
colorup : the color of the lines where close >= open
colordown : the color of the lines where close < open
alpha : bar transparency
nb: first point is not displayed - it is used only for choosing the
right color
"""
return volume_overlay(ax,closes[:-1],closes[1:],volumes[1:],colorup,colordown,width,alpha)
def volume_overlay3(ax, quotes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. quotes is a list of (d,
open, close, high, low, volume) and close-open is used to
determine the color of the bar
kwarg
width : the bar width in points
colorup : the color of the lines where close1 >= close0
colordown : the color of the lines where close1 < close0
alpha : bar transparency
"""
r,g,b = colorConverter.to_rgb(colorup)
colorup = r,g,b,alpha
r,g,b = colorConverter.to_rgb(colordown)
colordown = r,g,b,alpha
colord = { True : colorup,
False : colordown,
}
dates, opens, closes, highs, lows, volumes = zip(*quotes)
colors = [colord[close1>=close0] for close0, close1 in zip(closes[:-1], closes[1:]) if close0!=-1 and close1 !=-1]
colors.insert(0,colord[closes[0]>=opens[0]])
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, volume), (right, volume), (right, 0)) for d, open, close, high, low, volume in quotes]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
dates = [d for d, open, close, high, low, volume in quotes]
offsetsBars = [(d, 0) for d in dates]
useAA = 0, # use tuple here
lw = 0.5, # and here
barCollection = PolyCollection(bars,
facecolors = colors,
edgecolors = ( (0,0,0,1), ),
antialiaseds = useAA,
linewidths = lw,
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (min(dates), max(dates))
miny = 0
maxy = max([volume for d, open, close, high, low, volume in quotes])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
#print 'datalim', ax.dataLim.get_bounds()
#print 'viewlim', ax.viewLim.get_bounds()
ax.add_collection(barCollection)
ax.autoscale_view()
return barCollection
def index_bar(ax, vals,
facecolor='b', edgecolor='l',
width=4, alpha=1.0, ):
"""
Add a bar collection graph with height vals (-1 is missing).
ax : an Axes instance to plot to
width : the bar width in points
alpha : bar transparency
"""
facecolors = (colorConverter.to_rgba(facecolor, alpha),)
edgecolors = (colorConverter.to_rgba(edgecolor, alpha),)
right = width/2.0
left = -width/2.0
bars = [ ( (left, 0), (left, v), (right, v), (right, 0)) for v in vals if v != -1 ]
sx = ax.figure.dpi * (1.0/72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx,sy)
offsetsBars = [ (i, 0) for i,v in enumerate(vals) if v != -1 ]
barCollection = PolyCollection(bars,
facecolors = facecolors,
edgecolors = edgecolors,
antialiaseds = (0,),
linewidths = (0.5,),
offsets = offsetsBars,
transOffset = ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in vals if v!=-1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
return barCollection
| gpl-3.0 |
IndraVikas/scikit-learn | examples/ensemble/plot_partial_dependence.py | 249 | 4456 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [1]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [HTF2009]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [HTF2009] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [1] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
aavanian/bokeh | sphinx/source/docs/user_guide/examples/categorical_heatmap_unemployment.py | 11 | 1638 | import pandas as pd
from bokeh.io import output_file, show
from bokeh.models import BasicTicker, ColorBar, ColumnDataSource, LinearColorMapper, PrintfTickFormatter
from bokeh.plotting import figure
from bokeh.sampledata.unemployment1948 import data
from bokeh.transform import transform
output_file("unemploymemt.html")
data.Year = data.Year.astype(str)
data = data.set_index('Year')
data.drop('Annual', axis=1, inplace=True)
data.columns.name = 'Month'
# reshape to 1D array or rates with a month and year for each row.
df = pd.DataFrame(data.stack(), columns=['rate']).reset_index()
source = ColumnDataSource(df)
# this is the colormap from the original NYTimes plot
colors = ["#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce", "#ddb7b1", "#cc7878", "#933b41", "#550b1d"]
mapper = LinearColorMapper(palette=colors, low=df.rate.min(), high=df.rate.max())
p = figure(plot_width=800, plot_height=300, title="US Unemployment 1948—2016",
x_range=list(data.index), y_range=list(reversed(data.columns)),
toolbar_location=None, tools="", x_axis_location="above")
p.rect(x="Year", y="Month", width=1, height=1, source=source,
line_color=None, fill_color=transform('rate', mapper))
color_bar = ColorBar(color_mapper=mapper, location=(0, 0),
ticker=BasicTicker(desired_num_ticks=len(colors)),
formatter=PrintfTickFormatter(format="%d%%"))
p.add_layout(color_bar, 'right')
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = 1.0
show(p)
| bsd-3-clause |
aarora79/sb_study | common/utils.py | 1 | 12780 | # -*- coding: utf-8 -*-
import os
import string
import json
import pandas as pd
import numpy as np
from scipy.stats.stats import pearsonr
from dateutil.parser import parse
from common import globals as glob
SPECIAL_CHARS = string.punctuation.replace("_", "")
SPECIAL_CHARS = string.punctuation.replace("-", "")
SPECIAL_CHARS = set(SPECIAL_CHARS)
def get_quality_summary(qual):
summary = {}
#we are only intrested in the dqs (data quality score) under each category
for q in qual.keys():
summary[q] = {}
summary[q]['dqs'] = qual[q]['dqs']
return summary
def contains_special_chars(word):
#glob.log.info(word)
if any(char in SPECIAL_CHARS for char in word):
return True
else:
return False
def encode_str_in_csv(line, s):
line += '\"' + s + '\"' + ','
return line
def store_in_dict(d, k, key1, val1, key2, val2):
d[k] = {}
d[k][key1] = val1
d[k][key2] = val2
def check_date(qual, category, df, field):
glob.log.info('checking %s ...' %(field))
#just a simple check to validate date
num_invalid = 0
for i in range(len(df)):
store = df.ix[i]
try:
parse(store[field])
except Exception as e:
num_invalid += 1
glob.log.error('found an invalid %s -> %s for store id %s' % (field, str(store[field]), str(store['store_id'])))
glob.log.error(str(e))
qual[category][field] = {}
qual[category][field]['count'] = num_invalid
qual[category][field]['percent'] = round(100*(float(num_invalid)/len(df)), glob.PRECISION)
return num_invalid
def check_as_string_wo_special_chars(qual, category, df, field, prim_key_field):
glob.log.info('checking %s ...' %(field))
#just a simple check to say should not contain any special characters
num_invalid = 0
for i in range(len(df)):
store = df.ix[i]
if contains_special_chars(str(store[field])) == True:
num_invalid += 1
glob.log.error('found an invalid %s -> %s for store id %s' % (field, str(store[field]), str(store[prim_key_field])))
qual[category][field] = {}
qual[category][field]['count'] = num_invalid
qual[category][field]['percent'] = round(100*(float(num_invalid)/len(df)), glob.PRECISION)
return num_invalid
def check_as_numeric(qual, category, df, field):
glob.log.info('checking %s ...' %(field))
#just a simple check to say field should be numeric
num_invalid = 0
for i in range(len(df)):
row = df.ix[i]
val = str(row[field])
try:
float(val)
except Exception as e:
num_invalid += 1
glob.log.error('found an invalid %s -> %s' % (field, str(row[field])))
qual[category][field] = {}
qual[category][field]['count'] = num_invalid
qual[category][field]['percent'] = round(100*(float(num_invalid)/len(df)), glob.PRECISION)
return num_invalid
def check_missing(qual, df, mf = []):
glob.log.info('checking missing data...')
qual['missing_data'] = {}
#check missing fraction of missing rows in each column and also overall
total_cell_count = df.shape[0] * df.shape[1] #rows x columns
total_empty_cells = 0
mandatory_cells_empty_count = 0 #count of empty cells in mandatory columns
mandatory_feature_list_provided = (True if len(mf) != 0 else False)
for col in df.columns:
#find out the number of columns that are empty, the idea is that
#we would end up with a TRUE/FALSE array and then summing it up
#gives the count of FALSE or empty cells
empty_cells = df[col].isnull()
num_empty_cells = sum(empty_cells)
total_empty_cells += num_empty_cells
total_cells = len(empty_cells)
#if mandatory feature list provided then check if this feature is mandatory
#if no specific list is provided then consider all features as mandatory
if mandatory_feature_list_provided == True:
if col in mf:
mandatory_cells_empty_count += num_empty_cells
else:
mandatory_cells_empty_count += num_empty_cells
fraction_empty = 100*(float(num_empty_cells)/(total_cells))
#store this info in the dict if there were any empty cells
if num_empty_cells != 0:
store_in_dict(qual['missing_data'], col, 'percent',
round(fraction_empty, glob.PRECISION),
'count', num_empty_cells)
#overall empty cell fraction
fraction_empty = 100*(float(total_empty_cells)/(total_cell_count))
store_in_dict(qual['missing_data'], 'overall', 'percent',
round(fraction_empty, glob.PRECISION),
'count', total_empty_cells)
#calculate two data quality scores; a raw score calculated simply as
# 1 - (total missing values/total values) and an adjusted score which
#is calculated based on missing values that really matter i.e. which would
#cause the entire row to get discarded.
raw_score = round(100 - fraction_empty, glob.PRECISION)
if mandatory_feature_list_provided == True:
adjusted_raw_score = round(100 - ((float(mandatory_cells_empty_count)/total_cell_count)))
else:
#in case no mandatory features were provided then adjusted score is same as raw score
adjusted_raw_score = raw_score
qual['missing_data']['dqs'] = {}
qual['missing_data']['dqs']['raw_score'] = raw_score
qual['missing_data']['dqs']['adjusted_score'] = adjusted_raw_score
return qual
def write_dict_to_csv(data_dict, fname):
fname = os.path.join(glob.OUTPUT_DIR_NAME, fname)
glob.log.info('going to write dictionary to ' + fname)
#glob.log.info(json.dumps(data_dict, indent=glob.INDENT_LEVEL))
with open(fname, 'w') as csv_file:
#write the header row
line = '\"country_code\",' #first column is country code
#now the indicators
#get all the keys from the first dictionary, they are the same for all dictionaries
#this is a python3 thing because we would get a dict_keys, we convert it into a list
#and then the first element of the list we access and then get the keys from there
#this is specific to the WB data dictionary so this function shouldnt technically be
#in utils.py, but ok..
key_list = data_dict[list(data_dict.keys())[0]].keys()
for k in key_list:
line = encode_str_in_csv(line, k)
#remove the last ',' from the end
line = line[:-1]
line += '\n'
#read to write header row
csv_file.write(line)
#access dictionary by dictionary or rather country by country
for k in data_dict.keys():
#start with an empty line to write
line = ''
d = data_dict[k] #this dictionary represents one country
line = encode_str_in_csv(line, k) #country code
key_count = 0
for k2 in d.keys(): #indicators within a country
val = d[k2]
if k2 != 'name': #name key already holds a string
val = str(val)
#if there is single quote in the name then rmeove it, caused problem
#when reading the file back
val = val.replace('’', '')
#put the key in quotes, as some keys/values could have spaces
line = encode_str_in_csv(line, val) #value of individual indicators
key_count += 1
glob.log.info('country %s, indicators %d' %(k, key_count))
#write to csv
#remove the last ',' from the end
line = line[:-1]
line += '\n'
#glob.log.info(line)
#ignore any non-ascii characters, this is needed because certain country names
#have non utf-8 characters..they were causing an exception..
#line = line.encode('ascii', 'ignore')
csv_file.write(str(line))
def do_eda(df, filename, ds_name, categorial_feature_list, excluded_list):
glob.log.info('about to do some EDA (exploratory data analysis) on %s data...' %(ds_name))
eda_dict = { 'feature': [], 'mean': [], 'mode':[], 'median':[], 'stddev':[]}
#except for country code all fields are numeric and we can calculate
#mean, median and sd
for col in df.columns:
if col in excluded_list:
continue
eda_dict['feature'].append(col)
if col in categorial_feature_list:
#calc mode by first storing in a categorical series
s = pd.Categorical(df[col], categories=df[col].unique())
cc = s.value_counts()
pd.Series.sort(cc, inplace=True, ascending=False)
#what if there are two modes...just handle one case..doesnt happen in our dataset anyway
#if cc.ix[cc.index[0]] == cc.ix[cc.index[1]]:
# mode = cc.index[0] + ',' + cc.index[1]
# glob.log.info('there are more than 1 modes for %s[%s]' %(ds_name, col))
#else:
# mode = cc.index[0]
mode_str = str(cc.index[0]) + ':' + str(cc.ix[cc.index[0]])
eda_dict['mode'].append(mode_str)
eda_dict['mean'].append(0)
eda_dict['median'].append(0)
eda_dict['stddev'].append(0)
else:
#calc other descriptive stats
eda_dict['mode'].append(0)
eda_dict['mean'].append(df[col].mean())
eda_dict['median'].append(df[col].median())
eda_dict['stddev'].append(np.sqrt(df[col].var()))
eda_df = pd.DataFrame(eda_dict)
eda_df.to_csv(filename, index=False, encoding='utf-8')
try:
glob.log.info(eda_df)
except Exception as e:
glob.log.error('Exception while logging eda_df: ' + str(e))
def detect_outliers(df, ds_name, excluded_col_list, key_col_name):
glob.log.info('Detecting outliers for %s dataset' %(ds_name))
fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.EDA_DIR, ds_name + '_' + glob.OUTLIERS_CSV)
f = open(fname, 'w')
f.write('dataset,entry,field,value,3rdStdDev\n')
for col in df.columns:
if col in excluded_col_list:
continue
#refer to the column as a series, just for ease of understanding
S = df[col]
# we want to say anything outside of the 3rd standard deviation is an outlier...
outliers = S[((S-S.mean()).abs()>3*S.std())]
if len(outliers) > 0:
#print out the outliers
sigma = S.std()
for i in outliers.index:
entry = df.iloc[i]
glob.log.error('[%s] for entry %s, field %s has value %f which is outside the 3rd stddev(%f)'
%(ds_name, entry[key_col_name], col, entry[col], 3*sigma))
f.write('"%s","%s","%s","%f","%f"\n' %(ds_name, entry[key_col_name], col, entry[col], 3*sigma))
f.close()
def calc_r(ds_name, fname, df, feature_list):
glob.log.info('Calculating r for %s dataset' %(ds_name))
f = open(fname, 'w')
f.write('Dataset,feature1,feature2,r\n')
#remove all NA values as pearsons needs to have both series of the same size
df2 = df[feature_list].dropna()
#calculate perason coefficient for all possible combinations
for i in range(len(feature_list)-1):
for j in range(i+1, len(feature_list)):
r = pearsonr(df2[feature_list[i]], df2[feature_list[j]])[0]
glob.log.info('Pearson coeff(r) for %s and %s is %f' %(feature_list[i], feature_list[j], r))
f.write('%s,%s,%s,%f\n' %(ds_name,feature_list[i], feature_list[j], r))
f.close()
def calc_dqs(df):
df_temp = pd.isnull(df)
num_cells = df_temp.shape[0]*df_temp.shape[1]
empty_cells = 0
for c in df_temp.columns:
empty_cells += sum(df_temp[c])
dqs = ((num_cells-empty_cells)*100)/num_cells
glob.log.info('data quality score for dataset is %f' %(dqs))
return dqs
def get_numeric_feature_list(df, excluded_feature_list):
numeric_features = []
for col in df.columns:
try:
x=df[col].iloc[0]
float(x)#typecast the data to float to test if it is numeric
except:
glob.log.info('%s is not a numeric feature, ignoring' %(col))
else:
if col not in excluded_feature_list:
numeric_features.append(col)
return numeric_features | mit |
Merinorus/adaisawesome | Homework/02 - Data from the Web/Question 2.py | 1 | 14839 |
# coding: utf-8
# Obtain all the data for the Master students, starting from 2007. Compute how many months it took each master student to complete their master, for those that completed it. Partition the data between male and female students, and compute the average -- is the difference in average statistically significant?
#
# Notice that master students' data is more tricky than the bachelors' one, as there are many missing records in the IS-Academia database. Therefore, try to guess how much time a master student spent at EPFL by at least checking the distance in months between Master semestre 1 and Master semestre 2. If the Mineur field is not empty, the student should also appear registered in Master semestre 3. Last but not the least, don't forget to check if the student has an entry also in the Projet Master tables. Once you can handle well this data, compute the "average stay at EPFL" for master students. Now extract all the students with a Spécialisation and compute the "average stay" per each category of that attribute -- compared to the general average, can you find any specialization for which the difference in average is statistically significant?
# In[1]:
# Requests : make http requests to websites
import requests
# BeautifulSoup : parser to manipulate easily html content
from bs4 import BeautifulSoup
# Regular expressions
import re
# Aren't pandas awesome ?
import pandas as pd
# Let's get the first page in which we will be able to extract some interesting content !
# In[2]:
# Ask for the first page on IS Academia. To see it, just type it on your browser address bar : http://isa.epfl.ch/imoniteur_ISAP/!GEDPUBLICREPORTS.filter?ww_i_reportModel=133685247
r = requests.get('http://isa.epfl.ch/imoniteur_ISAP/!GEDPUBLICREPORTS.filter?ww_i_reportModel=133685247')
htmlContent = BeautifulSoup(r.content, 'html.parser')
# In[3]:
print(htmlContent.prettify())
# Now we need to make other requests to IS Academia, which specify every parameter : computer science students, all the years, and all bachelor semester (which are a couple of two values : pedagogic period and semester type). Thus, we're going to get all the parameters we need to make the next request :
# In[4]:
# We first get the "Computer science" value
computerScienceField = htmlContent.find('option', text='Informatique')
computerScienceField
# In[5]:
computerScienceValue = computerScienceField.get('value')
computerScienceValue
# In[6]:
# Then, we're going to need all the academic years values.
academicYearsField = htmlContent.find('select', attrs={'name':'ww_x_PERIODE_ACAD'})
academicYearsSet = academicYearsField.findAll('option')
# Since there are several years to remember, we're storing all of them in a table to use them later
academicYearValues = []
# We'll put the textual content in a table aswell ("Master semestre 1", "Master semestre 2"...)
academicYearContent = []
for option in academicYearsSet:
value = option.get('value')
# However, we don't want any "null" value
if value != 'null':
academicYearValues.append(value)
academicYearContent.append(option.text)
# In[7]:
# Now, we have all the academic years that might interest us. We wrangle them a little bit so be able to make request more easily later.
academicYearValues_series = pd.Series(academicYearValues)
academicYearContent_series = pd.Series(academicYearContent)
academicYear_df = pd.concat([academicYearContent_series, academicYearValues_series], axis = 1)
academicYear_df.columns= ['Academic_year', 'Value']
academicYear_df = academicYear_df.sort_values(['Academic_year', 'Value'], ascending=[1, 0])
academicYear_df
# In[8]:
# Then, let's get all the pedagogic periods we need. It's a little bit more complicated here because we need to link the pedagogic period with a season (eg : Bachelor 1 is autumn, Bachelor 2 is spring etc.)
# Thus, we need more than the pedagogic values. For doing some tests to associate them with the right season, we need the actual textual value ("Bachelor semestre 1", "Bachelor semestre 2" etc.)
pedagogicPeriodsField = htmlContent.find('select', attrs={'name':'ww_x_PERIODE_PEDAGO'})
pedagogicPeriodsSet = pedagogicPeriodsField.findAll('option')
# Same as above, we'll store the values in a table
pedagogicPeriodValues = []
# We'll put the textual content in a table aswell ("Master semestre 1", "Master semestre 2"...)
pedagogicPeriodContent = []
for option in pedagogicPeriodsSet:
value = option.get('value')
if value != 'null':
pedagogicPeriodValues.append(value)
pedagogicPeriodContent.append(option.text)
# In[9]:
# Let's make the values and content meet each other
pedagogicPeriodContent_series = pd.Series(pedagogicPeriodContent)
pedagogicPeriodValues_series = pd.Series(pedagogicPeriodValues)
pedagogicPeriod_df = pd.concat([pedagogicPeriodContent_series, pedagogicPeriodValues_series], axis = 1);
pedagogicPeriod_df.columns = ['Pedagogic_period', 'Value']
# In[10]:
# We keep all semesters related to master students
pedagogicPeriod_df_master = pedagogicPeriod_df[[period.startswith('Master') for period in pedagogicPeriod_df.Pedagogic_period]]
pedagogicPeriod_df_minor = pedagogicPeriod_df[[period.startswith('Mineur') for period in pedagogicPeriod_df.Pedagogic_period]]
pedagogicPeriod_df_project = pedagogicPeriod_df[[period.startswith('Projet Master') for period in pedagogicPeriod_df.Pedagogic_period]]
pedagogicPeriod_df = pd.concat([pedagogicPeriod_df_master, pedagogicPeriod_df_minor, pedagogicPeriod_df_project])
pedagogicPeriod_df
# In[11]:
# Lastly, we need to extract the values associated with autumn and spring semesters.
semesterTypeField = htmlContent.find('select', attrs={'name':'ww_x_HIVERETE'})
semesterTypeSet = semesterTypeField.findAll('option')
# Again, we need to store the values in a table
semesterTypeValues = []
# We'll put the textual content in a table aswell
semesterTypeContent = []
for option in semesterTypeSet:
value = option.get('value')
if value != 'null':
semesterTypeValues.append(value)
semesterTypeContent.append(option.text)
# In[12]:
# Here are the values for autumn and spring semester :
semesterTypeValues_series = pd.Series(semesterTypeValues)
semesterTypeContent_series = pd.Series(semesterTypeContent)
semesterType_df = pd.concat([semesterTypeContent_series, semesterTypeValues_series], axis = 1)
semesterType_df.columns = ['Semester_type', 'Value']
semesterType_df
# Now, we got all the information to get all the master students !
# Let's make all the requests we need to build our data.
# We will try to do requests such as :
# - Get students from master semester 1 of 2007-2008
# - ...
# - Get students from master semester 4 of 2007-2008
# - Get students from mineur semester 1 of 2007-2008
# - Get students from mineur semester 2 of 2007-2008
# - Get students from master project semester 1 of 2007-2008
# - Get students from master project semester 2 of 2007-2008
#
# ... and so on for each academic year until 2015-2016, the last complete year.
# We can even take the first semester of 2016-2017 into account, to check if some students we though they finished last year are actually still studying. This can be for different reasons : doing a mineur, a project, repeating a semester...
# We can ask for a list of student in two formats : HTML or CSV.
# We choosed to get them in a HTML format because this is the first time that we wrangle data in HTML format, and that may be really useful to learn in order to work with most of the websites in the future !
# The request sent by the browser to IS Academia, to get a list of student in a HTML format, looks like this :
# http://isa.epfl.ch/imoniteur_ISAP/!GEDPUBLICREPORTS.html?arg1=xxx&arg2=yyy
# With "xxx" the value associated with the argument named "arg1", "yyy" the value associated with the argument named "arg2" etc. It uses to have a lot more arguments.
# For instance, we tried to send a request as a "human" through our browser and intercepted it with Postman interceptor.
# We found that the folowing arguments have to be sent :
# ww_x_GPS = -1
# ww_i_reportModel = 133685247
# ww_i_reportModelXsl = 133685270
# ww_x_UNITE_ACAD = 249847 (which is the value of computer science !)
# ww_x_PERIODE_ACAD = X (eg : the value corresponding to 2007-2008 would be 978181)
# ww_x_PERIODE_PEDAGO = Y (eg : 2230106 for Master semestre 1)
# ww_x_HIVERETE = Z (eg : 2936286 for autumn semester)
#
# The last three values X, Y and Z must be replaced with the ones we extracted previously. For instance, if we want to get students from Master, semester 1 (which is necessarily autumn semester) of 2007-2008, the "GET Request" would be the following :
#
# http://isa.epfl.ch/imoniteur_ISAP/!GEDPUBLICREPORTS.html?ww_x_GPS=-1&ww_i_reportModel=133685247&ww_i_reportModelXsl=133685270&ww_x_UNITE_ACAD=249847&ww_x_PERIODE_ACAD=978181&ww_x_PERIODE_PEDAGO=2230106&ww_x_HIVERETE=2936286
#
# So let's cook all the requests we're going to send !
# In[13]:
# Let's put the semester types aside, because we're going to need them
autumn_semester_value = semesterType_df.loc[semesterType_df['Semester_type'] == 'Semestre d\'automne', 'Value']
autumn_semester_value = autumn_semester_value.iloc[0]
spring_semester_value = semesterType_df.loc[semesterType_df['Semester_type'] == 'Semestre de printemps', 'Value']
spring_semester_value = spring_semester_value.iloc[0]
# In[14]:
# Here is the list of the GET requests we will sent to IS Academia
requestsToISAcademia = []
# We'll need to associate all the information associated with the requests to help wrangling data later :
academicYearRequests = []
pedagogicPeriodRequests = []
semesterTypeRequests = []
# Go all over the years ('2007-2008', '2008-2009' and so on)
for academicYear_row in academicYear_df.itertuples(index=True, name='Academic_year'):
# The year (eg: '2007-2008')
academicYear = academicYear_row.Academic_year
# The associated value (eg: '978181')
academicYear_value = academicYear_row.Value
# We get all the pedagogic periods associated with this academic year
for pegagogicPeriod_row in pedagogicPeriod_df.itertuples(index=True, name='Pedagogic_period'):
# The period (eg: 'Master semestre 1')
pedagogicPeriod = pegagogicPeriod_row.Pedagogic_period
# The associated value (eg: '2230106')
pegagogicPeriod_Value = pegagogicPeriod_row.Value
# We need to associate the corresponding semester type (eg: Master semester 1 is autumn, but Master semester 2 will be spring)
if (pedagogicPeriod.endswith('1') or pedagogicPeriod.endswith('3') or pedagogicPeriod.endswith('automne')):
semester_Value = autumn_semester_value
semester = 'Autumn'
else:
semester_Value = spring_semester_value
semester = 'Spring'
# This print line is only for debugging if you want to check something
# print("academic year = " + academicYear_value + ", pedagogic value = " + pegagogicPeriod_Value + ", pedagogic period is " + pedagogicPeriod + " (semester type value = " + semester_Value + ")")
# We're ready to cook the request !
request = 'http://isa.epfl.ch/imoniteur_ISAP/!GEDPUBLICREPORTS.html?ww_x_GPS=-1&ww_i_reportModel=133685247&ww_i_reportModelXsl=133685270&ww_x_UNITE_ACAD=' + computerScienceValue
request = request + '&ww_x_PERIODE_ACAD=' + academicYear_value
request = request + '&ww_x_PERIODE_PEDAGO=' + pegagogicPeriod_Value
request = request + '&ww_x_HIVERETE=' + semester_Value
# Add the newly created request to our wish list...
requestsToISAcademia.append(request)
# And we save the corresponding information for each request
pedagogicPeriodRequests.append(pedagogicPeriod)
academicYearRequests.append(academicYear)
semesterTypeRequests.append(semester)
# In[15]:
# Here is the list of all the requests we have to send !
# requestsToISAcademia
# In[16]:
# Here are the corresponding years for each request
# academicYearRequests
# In[17]:
# Same for associated pedagogic periods
# pedagogicPeriodRequests
# In[18]:
# Last but not the least, the semester types
# semesterTypeRequests
# In[19]:
academicYearRequests_series = pd.Series(academicYearRequests)
pedagogicPeriodRequests_series = pd.Series(pedagogicPeriodRequests)
requestsToISAcademia_series = pd.Series(requestsToISAcademia)
# Let's summarize everything in a dataframe...
requests_df = pd.concat([academicYearRequests_series, pedagogicPeriodRequests_series, requestsToISAcademia_series], axis = 1)
requests_df.columns = ['Academic_year', 'Pedagogic_period', 'Request']
requests_df
# In[ ]:
# The requests are now ready to be sent to IS Academia. Let's try it out !
# TIME OUT : We stopped right here for our homework. What is below should look like the beginning of a loop that gets students lists from IS Academia. It's not finished at all :(
# In[20]:
# WARNING : NEXT LINE IS COMMENTED FOR DEBGUGGING THE FIRST REQUEST ONLY. UNCOMMENT IT AND INDENT THE CODE CORRECTLY TO MAKE ALL THE REQUESTS
#for request in requestsToISAcademia: # LINE TO UNCOMMENT TO SEND ALL REQUESTS
request = requestsToISAcademia[0] # LINE TO COMMENT TO SEND ALL REQUESTS
print(request)
# Send the request to IS Academia
r = requests.get(request)
# Here is the HTML content of IS Academia's response
htmlContent = BeautifulSoup(r.content, 'html.parser')
# Let's extract some data...
computerScienceField = htmlContent.find('option', text='Informatique')
# In[21]:
# Getting the table of students
# Let's make the columns
columns = []
table = htmlContent.find('table')
th = table.find('th', text='Civilité')
columns.append(th.text)
# Go through the table until the last column
while th.findNext('').name == 'th':
th = th.findNext('')
columns.append(th.text)
# This array will contain all the students
studentsTable = []
# DON'T RUN THE NEXT CELL OR IT WILL CRASH ! :x
# In[22]:
# Getting the information about the student we're "looping on"
currentStudent = []
tr = th.findNext('tr')
children = tr.children
for child in children:
currentStudent.append(child.text)
# Add the student to the array
studentsTable.append(currentStudent)
# In[23]:
a = tr.findNext('tr')
a
# In[ ]:
while tr.findNext('tr') is not None:
tr = th.findNext('tr')
children = tr.children
for child in children:
currentStudent.append(child.text)
studentsTable.append(currentStudent)
studentsTable
# In[ ]:
#tr = th.parent
#td = th.findNext('td')
#td.text
#th.findNext('th')
#th.findNext('th')
#tr = tr.findNext('tr')
#tr
# In[ ]:
print(htmlContent.prettify())
| gpl-3.0 |
cowlicks/odo | odo/backends/tests/test_ssh.py | 5 | 5764 | from __future__ import absolute_import, division, print_function
import pytest
paramiko = pytest.importorskip('paramiko')
import pandas as pd
import numpy as np
import re
import os
import sys
from odo.utils import tmpfile, filetext
from odo.directory import _Directory, Directory
from odo.backends.ssh import SSH, resource, ssh_pattern, sftp, drop, connect
from odo.backends.csv import CSV
from odo import into, discover, CSV, JSONLines, JSON, convert
from odo.temp import _Temp, Temp
from odo.compatibility import ON_TRAVIS_CI
import socket
skipif = pytest.mark.skipif
try:
ssh = connect(hostname='localhost')
ssh.close()
except socket.error:
pytest.skip('Could not connect')
except paramiko.PasswordRequiredException as e:
pytest.skip(str(e))
except paramiko.SSHException as e:
pytest.skip(str(e))
def test_resource():
r = resource('ssh://joe@localhost:/path/to/myfile.csv')
assert isinstance(r, SSH(CSV))
assert r.path == '/path/to/myfile.csv'
assert r.auth['hostname'] == 'localhost'
assert r.auth['username'] == 'joe'
def test_connect():
a = connect(hostname='localhost')
b = connect(hostname='localhost')
assert a is b
a.close()
c = connect(hostname='localhost')
assert a is c
assert c.get_transport() and c.get_transport().is_active()
def test_resource_directory():
r = resource('ssh://joe@localhost:/path/to/')
assert issubclass(r.subtype, _Directory)
r = resource('ssh://joe@localhost:/path/to/*.csv')
assert r.subtype == Directory(CSV)
assert r.path == '/path/to/'
def test_discover():
with filetext('name,balance\nAlice,100\nBob,200') as fn:
local = CSV(fn)
remote = SSH(CSV)(fn, hostname='localhost')
assert discover(local) == discover(remote)
def test_discover_from_resource():
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
local = CSV(fn)
remote = resource('ssh://localhost:' + fn)
assert discover(local) == discover(remote)
def test_ssh_pattern():
uris = ['localhost:myfile.csv',
'127.0.0.1:/myfile.csv',
'user@127.0.0.1:/myfile.csv',
'user@127.0.0.1:/*.csv',
'user@127.0.0.1:/my-dir/my-file3.csv']
for uri in uris:
assert re.match(ssh_pattern, uri)
def test_copy_remote_csv():
with tmpfile('csv') as target:
with filetext('name,balance\nAlice,100\nBob,200',
extension='csv') as fn:
csv = resource(fn)
uri = 'ssh://localhost:%s.csv' % target
scsv = into(uri, csv)
assert isinstance(scsv, SSH(CSV))
assert discover(scsv) == discover(csv)
# Round trip
csv2 = into(target, scsv)
assert into(list, csv) == into(list, csv2)
def test_drop():
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
with tmpfile('csv') as target:
scsv = SSH(CSV)(target, hostname='localhost')
assert not os.path.exists(target)
conn = sftp(**scsv.auth)
conn.put(fn, target)
assert os.path.exists(target)
drop(scsv)
drop(scsv)
assert not os.path.exists(target)
def test_drop_of_csv_json_lines_use_ssh_version():
from odo.backends.ssh import drop_ssh
for typ in [CSV, JSON, JSONLines]:
assert drop.dispatch(SSH(typ)) == drop_ssh
def test_convert_local_file_to_temp_ssh_file():
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn)
scsv = convert(Temp(SSH(CSV)), csv, hostname='localhost')
assert into(list, csv) == into(list, scsv)
@skipif(ON_TRAVIS_CI, reason="Don't know")
def test_temp_ssh_files():
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn)
scsv = into(Temp(SSH(CSV)), csv, hostname='localhost')
assert discover(csv) == discover(scsv)
assert isinstance(scsv, _Temp)
@skipif(ON_TRAVIS_CI, reason="Don't know")
def test_convert_through_temporary_local_storage():
with filetext('name,quantity\nAlice,100\nBob,200', extension='csv') as fn:
csv = CSV(fn)
df = into(pd.DataFrame, csv)
scsv = into(Temp(SSH(CSV)), csv, hostname='localhost')
assert into(list, csv) == into(list, scsv)
scsv2 = into(Temp(SSH(CSV)), df, hostname='localhost')
assert into(list, scsv2) == into(list, df)
sjson = into(Temp(SSH(JSONLines)), df, hostname='localhost')
assert (into(np.ndarray, sjson) == into(np.ndarray, df)).all()
@skipif(ON_TRAVIS_CI and sys.version_info[0] == 3,
reason='Strange hanging on travis for python33 and python34')
def test_ssh_csv_to_s3_csv():
# for some reason this can only be run in the same file as other ssh tests
# and must be a Temp(SSH(CSV)) otherwise tests above this one fail
s3_bucket = pytest.importorskip('odo.backends.tests.test_aws').s3_bucket
with filetext('name,balance\nAlice,100\nBob,200', extension='csv') as fn:
remote = into(Temp(SSH(CSV)), CSV(fn), hostname='localhost')
with s3_bucket('.csv') as b:
result = into(b, remote)
assert discover(result) == discover(resource(b))
@skipif(ON_TRAVIS_CI and sys.version_info[0] == 3,
reason='Strange hanging on travis for python33 and python34')
def test_s3_to_ssh():
pytest.importorskip('boto')
tips_uri = 's3://nyqpug/tips.csv'
with tmpfile('.csv') as fn:
result = into(Temp(SSH(CSV))(fn, hostname='localhost'), tips_uri)
assert into(list, result) == into(list, tips_uri)
assert discover(result) == discover(resource(tips_uri))
| bsd-3-clause |
shyamalschandra/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
Erotemic/hotspotter | _scripts/k_reciprocal_nearest_neighbors.py | 2 | 4308 | import sys
import os
import pyflann
import params
import numpy as np
import draw_func2 as df2
import helpers
np.random.seed(5)
# Parameters
tdim = 2; # Target viewing dimensions
dim = 2; # Calculation dimension
if len(sys.argv) == 2:
tdim = int(sys.argv[1])
dim = int(sys.argv[1])
K = 4;
checks = 128;
nQuery = 8;
nData = 1024;
# Script
def quick_flann_index(data):
data_flann = pyflann.FLANN()
flann_params = params.VSMANY_FLANN_PARAMS
checks = flann_params['checks']
data_flann.build_index(data, **flann_params)
return data_flann
def reciprocal_nearest_neighbors(query, data, data_flann, checks):
nQuery, dim = query.shape
# Assign query features to K nearest database features
(qfx2_dx, qfx2_dists) = data_flann.nn_index(query, K, checks=checks)
# Assign those nearest neighbors to K nearest database features
qx2_nn = data[qfx2_dx]
qx2_nn.shape = (nQuery*K, dim)
(_nn2_dx, nn2_dists) = data_flann.nn_index(qx2_nn, K, checks=checks)
# Get the maximum distance of the reciprocal neighbors
nn2_dists.shape = (nQuery, K, K)
qfx2_maxdist = nn2_dists.max(2)
# Test if nearest neighbor distance is less than reciprocal distance
isReciprocal = qfx2_dists < qfx2_maxdist
return qfx2_dx, qfx2_dists, isReciprocal
data = np.random.rand(nData, dim)
query = np.random.rand(nQuery, dim)
nQuery = len(query)
# Find query's Nearest Neighbors in data
data_flann = quick_flann_index(data)
(qfx2_dx, qfx2_dists) = data_flann.nn_index(query, K, checks=checks)
qx2_nn = data[qfx2_dx]
# get k-reciprocal nearest neighbors max distance
qx2_nn.shape = (nQuery*K, dim)
(nn2_dx, nn2_dists) = data_flann.nn_index(qx2_nn, K, checks=checks)
nn2_data = data[nn2_dx] # data's nearest neighbors
nn2_dists.shape = (nQuery, K, K)
qx2_nn.shape = (nQuery, K, dim)
qfx2_maxdist = nn2_dists.max(2)
# A neighbor is a K reciprocal if you are within the
# max distance of the assigned points K nearest neighbors
isReciprocal = qfx2_dists < qfx2_maxdist
krx2_nn = qx2_nn[isReciprocal]
krx2_qfx = helpers.tiled_range(nQuery, K)[isReciprocal]
krx2_query = query[krx2_qfx]
# Enforce viewable dimensionality
if dim != tdim:
import sklearn.decomposition
print('Plotting pca.transform dimensionality')
pca = sklearn.decomposition.PCA(copy=True, n_components=tdim, whiten=False)
pca.fit(data)
query_ = pca.transform(query)
data_ = pca.transform(data)
nn2_data_ = pca.transform(nn2_data)
qx2_nn_ = pca.transform(qx2_nn)
krx2_query_ = pca.transform(krx2_query)
krx2_nn_ = pca.transform(krx2_nn)
else:
print('Plotting full dimensionality')
query_ = (query)
data_ = (data)
qx2_nn_ = (qx2_nn)
krx2_query_ = (krx2_query)
krx2_nn_ = (krx2_nn)
# Figure and Axis
plt = df2.plt
df2.reset()
fig = plt.figure(1)
if tdim == 2:
ax = fig.add_subplot(111)
elif tdim > 2:
from mpl_toolkits.mplot3d import Axes3D
ax = fig.add_subplot(111, projection='3d')
def plot_points(data, color, marker):
dataT = data.T
if len(dataT) == 2:
ax.plot(dataT[0], dataT[1], color=color, marker=marker, linestyle='None')
elif len(dataT) == 3:
ax.scatter(dataT[0], dataT[1], dataT[2], color=color, marker=marker)
def plot_lines(point_pairs, color):
for pair in point_pairs:
dataT = pair.T
if len(dataT) == 2:
ax.plot(dataT[0], dataT[1], color=color)
elif len(dataT) == 3:
ax.plot(dataT[0], dataT[1], dataT[2], color=color)
#plt.scatter(dataT[0], dataT[1], dataT[2], s=20, color=color)
# Plot query / data
plot_points(data_, 'b', 'x')
plot_points(query_,'b', 'o')
# Plot KNN
qx2_nn_.shape = (nQuery, K, tdim)
point_pairs = [np.vstack((query_[qx], qx2_nn_[qx,k])) for qx in xrange(nQuery) for k in xrange(K)]
plot_lines(point_pairs, (1, 0, 0, .8))
# Plot NN's KNN
qx2_nn_.shape = (nQuery*K, tdim)
nRes = len(qx2_nn_)
point_pairs3 = [np.vstack((qx2_nn_[nnx], nn2_data_[nnx,k])) for nnx in xrange(nRes) for k in xrange(K)]
plot_lines(point_pairs3, (1, .8, .8, .5))
# Plot KRNN
point_pairs2 = map(np.vstack, zip(krx2_query_, krx2_nn_))
plot_lines(point_pairs2, (0, 1, 0, .9))
df2.update()
# Show
df2.set_figtitle('KRNN=(Green), NN=(Red), NNR=(Pink), dims=%r, K=%r' % (dim, K))
exec(df2.present())
| apache-2.0 |
krez13/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
DougFirErickson/neon | examples/conv_autoencoder.py | 3 | 2945 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Convolutional autoencoder example network for MNIST data set
"""
import numpy as np
from neon.data import ArrayIterator, load_mnist
from neon.initializers import Uniform
from neon.layers import Conv, Pooling, GeneralizedCost, Deconv
from neon.models import Model
from neon.optimizers import GradientDescentMomentum
from neon.transforms import Rectlin, SumSquared
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser
# parse the command line arguments
parser = NeonArgparser(__doc__)
args = parser.parse_args()
# Load dataset
(X_train, y_train), (X_test, y_test), nclass = load_mnist(path=args.data_dir)
# Set input and target to X_train
train = ArrayIterator(X_train, lshape=(1, 28, 28))
# Initialize the weights and the learning rule
init_uni = Uniform(low=-0.1, high=0.1)
opt_gdm = GradientDescentMomentum(learning_rate=0.001, momentum_coef=0.9)
# Define the layers
layers = [Conv((4, 4, 8), init=init_uni, activation=Rectlin()),
Pooling(2),
Conv((4, 4, 32), init=init_uni, activation=Rectlin()),
Pooling(2),
Deconv(fshape=(4, 4, 8), init=init_uni, activation=Rectlin()),
Deconv(fshape=(3, 3, 8), init=init_uni, activation=Rectlin(), strides=2),
Deconv(fshape=(2, 2, 1), init=init_uni, strides=2, padding=1)]
# Define the cost
cost = GeneralizedCost(costfunc=SumSquared())
model = Model(layers=layers)
# configure callbacks
callbacks = Callbacks(model, **args.callback_args)
# Fit the model
model.fit(train, optimizer=opt_gdm, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
# Plot the reconstructed digits
try:
from matplotlib import pyplot, cm
fi = 0
nrows = 10
ncols = 12
test = np.zeros((28*nrows, 28*ncols))
idxs = [(row, col) for row in range(nrows) for col in range(ncols)]
for row, col in idxs:
im = model.layers.layers[-1].outputs.get()[:, fi].reshape((28, 28))
test[28*row:28*(row+1):, 28*col:28*(col+1)] = im
fi = fi + 1
pyplot.matshow(test, cmap=cm.gray)
pyplot.savefig('Reconstructed.png')
except ImportError:
print 'matplotlib needs to be manually installed to generate plots'
| apache-2.0 |
neutrons/Licorne-Py | UI-playground/layerplot.py | 1 | 2806 | from __future__ import (absolute_import, division, print_function)
from PyQt5 import QtCore, QtWidgets
import sys
import numpy as np
from layer import Layer, MSLD
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import matplotlib.cm
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
class layerplot(QtWidgets.QWidget):
def __init__(self, *args):
QtWidgets.QWidget.__init__(self, *args)
sample=[Layer(nsld=5),Layer(thickness=2.,nsld=3),Layer(nsld=5),Layer(nsld=4.,thickness=np.inf)]
self.m = PlotCanvas(sample, self)
self.m.move(0,0)
def resizeEvent(self, event):
self.m.setGeometry(self.rect())
class PlotCanvas(FigureCanvas):
def __init__(self, layers, parent=None):
self.fig = Figure()
self.axes = self.fig.add_subplot(111)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.data=layers
self.variable='nsld'
self.plot()
self.fig.canvas.mpl_connect('pick_event', self.onpick)
def onpick(self,event):
ind=event.ind[0]
if ind==len(self.data)-1:
ind='substrate'
print('picked layer {0}'.format(ind))
return True
def plot(self):
layer_thick_array=np.array([l.thickness for l in self.data])
layer_nsld_array =np.array([l.nsld for l in self.data])
depth=np.zeros(len(layer_thick_array))
depth[1:]=layer_thick_array.cumsum()[:-1]
patches=[]
N=len(self.data)
for i in range(N-1):
polygon=Polygon([[depth[i],0.],[depth[i],layer_nsld_array[i]],[depth[i+1],layer_nsld_array[i]],[depth[i+1],0]],True)
patches.append(polygon)
polygon=Polygon([[depth[N-1],0.],[depth[N-1],layer_nsld_array[N-1]],[depth[N-1]+1,layer_nsld_array[N-1]],[depth[N-1]+1,0]],True)
patches.append(polygon)
p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4, picker=True)
colors = 100*np.random.rand(len(patches))
p.set_array(np.array(colors))
ax = self.figure.add_subplot(111)
ax.add_collection(p)
ax.set_title('NSLD')
ax.set_xlim(np.array([0,depth[-1]])*1.2)
ax.set_ylim(np.array([0,layer_nsld_array.max()])*1.2) #TODO allow negative
ax.set_xlabel('Thickness')
ax.set_ylabel('NSLD')
self.draw()
if __name__=='__main__':
app=QtWidgets.QApplication(sys.argv)
mainForm=layerplot()
mainForm.show()
sys.exit(app.exec_())
| gpl-3.0 |
gameduell/dask | dask/utils.py | 1 | 30745 | from __future__ import absolute_import, division, print_function
import codecs
import functools
import inspect
import io
import math
import os
import re
import shutil
import struct
import sys
import tempfile
from errno import ENOENT
from collections import Iterator
from contextlib import contextmanager
from importlib import import_module
from threading import Lock
import multiprocessing as mp
from .import multiprocessing
import uuid
from weakref import WeakValueDictionary
from .compatibility import (long, getargspec, BZ2File, GzipFile, LZMAFile, PY3,
urlsplit, unicode)
from .core import get_deps
from .context import _globals
system_encoding = sys.getdefaultencoding()
if system_encoding == 'ascii':
system_encoding = 'utf-8'
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
def import_required(mod_name, error_msg):
"""Attempt to import a required dependency.
Raises a RuntimeError if the requested module is not available.
"""
try:
return import_module(mod_name)
except ImportError:
raise RuntimeError(error_msg)
@contextmanager
def tmpfile(extension='', dir=None):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension, dir=dir)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
with ignoring(OSError):
os.remove(filename)
@contextmanager
def tmpdir(dir=None):
dirname = tempfile.mkdtemp(dir=dir)
try:
yield dirname
finally:
if os.path.exists(dirname):
if os.path.isdir(dirname):
with ignoring(OSError):
shutil.rmtree(dirname)
else:
with ignoring(OSError):
os.remove(dirname)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
@contextmanager
def changed_cwd(new_cwd):
old_cwd = os.getcwd()
os.chdir(new_cwd)
try:
yield
finally:
os.chdir(old_cwd)
@contextmanager
def tmp_cwd(dir=None):
with tmpdir(dir) as dirname:
with changed_cwd(dirname):
yield dirname
@contextmanager
def noop_context():
yield
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:]
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open, mode='t', use_tmpdir=True):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
Since this is meant for use in tests, this context manager will
automatically switch to a temporary current directory, to avoid
race conditions when running tests in parallel.
"""
with (tmp_cwd() if use_tmpdir else noop_context()):
for filename, text in d.items():
f = open(filename, 'w' + mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
with ignoring(OSError):
os.remove(filename)
compressions = {'gz': 'gzip', 'bz2': 'bz2', 'xz': 'xz'}
def infer_compression(filename):
extension = os.path.splitext(filename)[-1].strip('.')
return compressions.get(extension, None)
opens = {'gzip': GzipFile, 'bz2': BZ2File, 'xz': LZMAFile}
def open(filename, mode='rb', compression=None, **kwargs):
if compression == 'infer':
compression = infer_compression(filename)
return opens.get(compression, io.open)(filename, mode, **kwargs)
def get_bom(fn, compression=None):
"""
Get the Byte Order Mark (BOM) if it exists.
"""
boms = set((codecs.BOM_UTF16, codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE))
with open(fn, mode='rb', compression=compression) as f:
f.seek(0)
bom = f.read(2)
f.seek(0)
if bom in boms:
return bom
else:
return b''
def get_bin_linesep(encoding, linesep):
"""
Simply doing `linesep.encode(encoding)` does not always give you
*just* the linesep bytes, for some encodings this prefix's the
linesep bytes with the BOM. This function ensures we just get the
linesep bytes.
"""
if encoding == 'utf-16':
return linesep.encode('utf-16')[2:] # [2:] strips bom
else:
return linesep.encode(encoding)
def textblock(filename, start, end, compression=None, encoding=system_encoding,
linesep=os.linesep, buffersize=4096):
"""Pull out a block of text from a file given start and stop bytes.
This gets data starting/ending from the next linesep delimiter. Each block
consists of bytes in the range [start,end[, i.e. the stop byte is excluded.
If `start` is 0, then `start` corresponds to the true start byte. If
`start` is greater than 0 and does not point to the beginning of a new
line, then `start` is incremented until it corresponds to the start byte of
the next line. If `end` does not point to the beginning of a new line, then
the line that begins before `end` is included in the block although its
last byte exceeds `end`.
Examples
--------
>> with open('myfile.txt', 'wb') as f:
.. f.write('123\n456\n789\nabc')
In the example below, 1 and 10 don't line up with endlines.
>> u''.join(textblock('myfile.txt', 1, 10))
'456\n789\n'
"""
# Make sure `linesep` is not a byte string because
# `io.TextIOWrapper` in Python versions other than 2.7 dislike byte
# strings for the `newline` argument.
linesep = str(linesep)
# Get byte representation of the line separator.
bin_linesep = get_bin_linesep(encoding, linesep)
bin_linesep_len = len(bin_linesep)
if buffersize < bin_linesep_len:
error = ('`buffersize` ({0:d}) must be at least as large as the '
'number of line separator bytes ({1:d}).')
raise ValueError(error.format(buffersize, bin_linesep_len))
chunksize = end - start
with open(filename, 'rb', compression) as f:
with io.BufferedReader(f) as fb:
# If `start` does not correspond to the beginning of the file, we
# need to move the file pointer to `start - len(bin_linesep)`,
# search for the position of the next a line separator, and set
# `start` to the position after that line separator.
if start > 0:
# `start` is decremented by `len(bin_linesep)` to detect the
# case where the original `start` value corresponds to the
# beginning of a line.
start = max(0, start - bin_linesep_len)
# Set the file pointer to `start`.
fb.seek(start)
# Number of bytes to shift the file pointer before reading a
# new chunk to make sure that a multi-byte line separator, that
# is split by the chunk reader, is still detected.
shift = 1 - bin_linesep_len
while True:
buf = f.read(buffersize)
if len(buf) < bin_linesep_len:
raise StopIteration
try:
# Find the position of the next line separator and add
# `len(bin_linesep)` which yields the position of the
# first byte of the next line.
start += buf.index(bin_linesep)
start += bin_linesep_len
except ValueError:
# No line separator was found in the current chunk.
# Before reading the next chunk, we move the file
# pointer back `len(bin_linesep) - 1` bytes to make
# sure that a multi-byte line separator, that may have
# been split by the chunk reader, is still detected.
start += len(buf)
start += shift
fb.seek(shift, os.SEEK_CUR)
else:
# We have found the next line separator, so we need to
# set the file pointer to the first byte of the next
# line.
fb.seek(start)
break
with io.TextIOWrapper(fb, encoding, newline=linesep) as fbw:
# Retrieve and yield lines until the file pointer reaches
# `end`.
while start < end:
line = next(fbw)
# We need to encode the line again to get the byte length
# in order to correctly update `start`.
bin_line_len = len(line.encode(encoding))
if chunksize < bin_line_len:
error = ('`chunksize` ({0:d}) is less than the line '
'length ({1:d}). This may cause duplicate '
'processing of this line. It is advised to '
'increase `chunksize`.')
raise IOError(error.format(chunksize, bin_line_len))
yield line
start += bin_line_len
def concrete(seq):
""" Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def skip(func):
pass
def pseudorandom(n, p, random_state=None):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], random_state=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], random_state=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
x = random_state.random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def random_state_data(n, random_state=None):
"""Return a list of arrays that can initialize
``np.random.RandomState``.
Parameters
----------
n : int
Number of tuples to return.
random_state : int or np.random.RandomState, optional
If an int, is used to seed a new ``RandomState``.
"""
import numpy as np
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
maxuint32 = np.iinfo(np.uint32).max
return [(random_state.rand(624) * maxuint32).astype('uint32')
for i in range(n)]
def is_integer(i):
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
import numpy as np
if isinstance(i, (int, long)):
return True
if isinstance(i, float):
return (i).is_integer()
if issubclass(type(i), np.integer):
return i
else:
return False
def file_size(fn, compression=None):
""" Size of a file on disk
If compressed then return the uncompressed file size
"""
if compression == 'gzip':
with open(fn, 'rb') as f:
f.seek(-4, 2)
result = struct.unpack('I', f.read(4))[0]
elif compression:
# depending on the implementation, this may be inefficient
with open(fn, 'rb', compression) as f:
result = f.seek(0, 2)
else:
result = os.stat(fn).st_size
return result
ONE_ARITY_BUILTINS = set([abs, all, any, bool, bytearray, bytes, callable, chr,
classmethod, complex, dict, dir, enumerate, eval,
float, format, frozenset, hash, hex, id, int, iter,
len, list, max, min, next, oct, open, ord, range,
repr, reversed, round, set, slice, sorted,
staticmethod, str, sum, tuple,
type, vars, zip, memoryview])
if PY3:
ONE_ARITY_BUILTINS.add(ascii) # noqa: F821
MULTI_ARITY_BUILTINS = set([compile, delattr, divmod, filter, getattr, hasattr,
isinstance, issubclass, map, pow, setattr])
def takes_multiple_arguments(func):
""" Does this function take multiple arguments?
>>> def f(x, y): pass
>>> takes_multiple_arguments(f)
True
>>> def f(x): pass
>>> takes_multiple_arguments(f)
False
>>> def f(x, y=None): pass
>>> takes_multiple_arguments(f)
False
>>> def f(*args): pass
>>> takes_multiple_arguments(f)
True
>>> class Thing(object):
... def __init__(self, a): pass
>>> takes_multiple_arguments(Thing)
False
"""
if func in ONE_ARITY_BUILTINS:
return False
elif func in MULTI_ARITY_BUILTINS:
return True
try:
spec = getargspec(func)
except:
return False
try:
is_constructor = spec.args[0] == 'self' and isinstance(func, type)
except:
is_constructor = False
if spec.varargs:
return True
if spec.defaults is None:
return len(spec.args) - is_constructor != 1
return len(spec.args) - len(spec.defaults) - is_constructor > 1
class Dispatch(object):
"""Simple single dispatch."""
def __init__(self):
self._lookup = {}
self._lazy = {}
def register(self, type, func=None):
"""Register dispatch of `func` on arguments of type `type`"""
def wrapper(func):
if isinstance(type, tuple):
for t in type:
self.register(t, func)
else:
self._lookup[type] = func
return func
return wrapper(func) if func is not None else wrapper
def register_lazy(self, toplevel, func=None):
"""
Register a registration function which will be called if the
*toplevel* module (e.g. 'pandas') is ever loaded.
"""
def wrapper(func):
self._lazy[toplevel] = func
return func
return wrapper(func) if func is not None else wrapper
def __call__(self, arg):
# Fast path with direct lookup on type
lk = self._lookup
typ = type(arg)
try:
impl = lk[typ]
except KeyError:
pass
else:
return impl(arg)
# Is a lazy registration function present?
toplevel, _, _ = typ.__module__.partition('.')
try:
register = self._lazy.pop(toplevel)
except KeyError:
pass
else:
register()
return self(arg) # recurse
# Walk the MRO and cache the lookup result
for cls in inspect.getmro(typ)[1:]:
if cls in lk:
lk[typ] = lk[cls]
return lk[cls](arg)
raise TypeError("No dispatch for {0} type".format(typ))
def ensure_not_exists(filename):
"""
Ensure that a file does not exist.
"""
try:
os.unlink(filename)
except OSError as e:
if e.errno != ENOENT:
raise
def _skip_doctest(line):
# NumPy docstring contains cursor and comment only example
stripped = line.strip()
if stripped == '>>>' or stripped.startswith('>>> #'):
return stripped
elif '>>>' in stripped:
return line + ' # doctest: +SKIP'
else:
return line
def skip_doctest(doc):
if doc is None:
return ''
return '\n'.join([_skip_doctest(line) for line in doc.split('\n')])
def derived_from(original_klass, version=None, ua_args=[]):
"""Decorator to attach original class's docstring to the wrapped method.
Parameters
----------
original_klass: type
Original class which the method is derived from
version : str
Original package version which supports the wrapped method
ua_args : list
List of keywords which Dask doesn't support. Keywords existing in
original but not in Dask will automatically be added.
"""
def wrapper(method):
method_name = method.__name__
try:
# do not use wraps here, as it hides keyword arguments displayed
# in the doc
original_method = getattr(original_klass, method_name)
doc = original_method.__doc__
if doc is None:
doc = ''
try:
method_args = getargspec(method).args
original_args = getargspec(original_method).args
not_supported = [m for m in original_args if m not in method_args]
except TypeError:
not_supported = []
if len(ua_args) > 0:
not_supported.extend(ua_args)
if len(not_supported) > 0:
note = ("\n Notes\n -----\n"
" Dask doesn't supports following argument(s).\n\n")
args = ''.join([' * {0}\n'.format(a) for a in not_supported])
doc = doc + note + args
doc = skip_doctest(doc)
method.__doc__ = doc
return method
except AttributeError:
module_name = original_klass.__module__.split('.')[0]
@functools.wraps(method)
def wrapped(*args, **kwargs):
msg = "Base package doesn't support '{0}'.".format(method_name)
if version is not None:
msg2 = " Use {0} {1} or later to use this method."
msg += msg2.format(module_name, version)
raise NotImplementedError(msg)
return wrapped
return wrapper
def funcname(func):
"""Get the name of a function."""
# functools.partial
if isinstance(func, functools.partial):
return funcname(func.func)
# methodcaller
if isinstance(func, methodcaller):
return func.method
module_name = getattr(func, '__module__', None) or ''
type_name = getattr(type(func), '__name__', None) or ''
# toolz.curry
if 'toolz' in module_name and 'curry' == type_name:
return func.func_name
# multipledispatch objects
if 'multipledispatch' in module_name and 'Dispatcher' == type_name:
return func.name
# All other callables
try:
name = func.__name__
if name == '<lambda>':
return 'lambda'
return name
except:
return str(func)
def ensure_bytes(s):
""" Turn string or bytes to bytes
>>> ensure_bytes(u'123')
'123'
>>> ensure_bytes('123')
'123'
>>> ensure_bytes(b'123')
'123'
"""
if isinstance(s, bytes):
return s
if hasattr(s, 'encode'):
return s.encode()
msg = "Object %s is neither a bytes object nor has an encode method"
raise TypeError(msg % s)
def ensure_unicode(s):
""" Turn string or bytes to bytes
>>> ensure_unicode(u'123')
u'123'
>>> ensure_unicode('123')
u'123'
>>> ensure_unicode(b'123')
u'123'
"""
if isinstance(s, unicode):
return s
if hasattr(s, 'decode'):
return s.decode()
msg = "Object %s is neither a bytes object nor has an encode method"
raise TypeError(msg % s)
def digit(n, k, base):
"""
>>> digit(1234, 0, 10)
4
>>> digit(1234, 1, 10)
3
>>> digit(1234, 2, 10)
2
>>> digit(1234, 3, 10)
1
"""
return n // base**k % base
def insert(tup, loc, val):
"""
>>> insert(('a', 'b', 'c'), 0, 'x')
('x', 'b', 'c')
"""
L = list(tup)
L[loc] = val
return tuple(L)
def build_name_function(max_int):
""" Returns a function that receives a single integer
and returns it as a string padded by enough zero characters
to align with maximum possible integer
>>> name_f = build_name_function(57)
>>> name_f(7)
'07'
>>> name_f(31)
'31'
>>> build_name_function(1000)(42)
'0042'
>>> build_name_function(999)(42)
'042'
>>> build_name_function(0)(0)
'0'
"""
# handle corner cases max_int is 0 or exact power of 10
max_int += 1e-8
pad_length = int(math.ceil(math.log10(max_int)))
def name_function(i):
return str(i).zfill(pad_length)
return name_function
def infer_storage_options(urlpath, inherit_storage_options=None):
""" Infer storage options from URL path and merge it with existing storage
options.
Parameters
----------
urlpath: str or unicode
Either local absolute file path or URL (hdfs://namenode:8020/file.csv)
storage_options: dict (optional)
Its contents will get merged with the inferred information from the
given path
Returns
-------
Storage options dict.
Examples
--------
>>> infer_storage_options('/mnt/datasets/test.csv') # doctest: +SKIP
{"protocol": "file", "path", "/mnt/datasets/test.csv"}
>>> infer_storage_options(
... 'hdfs://username:pwd@node:123/mnt/datasets/test.csv?q=1',
... inherit_storage_options={'extra': 'value'}) # doctest: +SKIP
{"protocol": "hdfs", "username": "username", "password": "pwd",
"host": "node", "port": 123, "path": "/mnt/datasets/test.csv",
"url_query": "q=1", "extra": "value"}
"""
# Handle Windows paths including disk name in this special case
if re.match(r'^[a-zA-Z]:[\\/]', urlpath):
return {'protocol': 'file',
'path': urlpath}
parsed_path = urlsplit(urlpath)
protocol = parsed_path.scheme or 'file'
path = parsed_path.path
if protocol == 'file':
# Special case parsing file protocol URL on Windows according to:
# https://msdn.microsoft.com/en-us/library/jj710207.aspx
windows_path = re.match(r'^/([a-zA-Z])[:|]([\\/].*)$', path)
if windows_path:
path = '%s:%s' % windows_path.groups()
inferred_storage_options = {
'protocol': protocol,
'path': path,
}
if parsed_path.netloc:
# Parse `hostname` from netloc manually because `parsed_path.hostname`
# lowercases the hostname which is not always desirable (e.g. in S3):
# https://github.com/dask/dask/issues/1417
inferred_storage_options['host'] = parsed_path.netloc.rsplit('@', 1)[-1].rsplit(':', 1)[0]
if parsed_path.port:
inferred_storage_options['port'] = parsed_path.port
if parsed_path.username:
inferred_storage_options['username'] = parsed_path.username
if parsed_path.password:
inferred_storage_options['password'] = parsed_path.password
if parsed_path.query:
inferred_storage_options['url_query'] = parsed_path.query
if parsed_path.fragment:
inferred_storage_options['url_fragment'] = parsed_path.fragment
if inherit_storage_options:
if set(inherit_storage_options) & set(inferred_storage_options):
raise KeyError("storage options (%r) and path url options (%r) "
"collision is detected"
% (inherit_storage_options, inferred_storage_options))
inferred_storage_options.update(inherit_storage_options)
return inferred_storage_options
def dependency_depth(dsk):
import toolz
deps, _ = get_deps(dsk)
@toolz.memoize
def max_depth_by_deps(key):
if not deps[key]:
return 1
d = 1 + max(max_depth_by_deps(dep_key) for dep_key in deps[key])
return d
return max(max_depth_by_deps(dep_key) for dep_key in deps.keys())
def eq_strict(a, b):
"""Returns True if both values have the same type and are equal."""
if type(a) is type(b):
return a == b
return False
def memory_repr(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def put_lines(buf, lines):
if any(not isinstance(x, unicode) for x in lines):
lines = [unicode(x) for x in lines]
buf.write('\n'.join(lines))
hex_pattern = re.compile('[a-f]+')
def key_split(s):
"""
>>> key_split('x')
u'x'
>>> key_split('x-1')
u'x'
>>> key_split('x-1-2-3')
u'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
u'x'
>>> key_split('hello-world-1')
u'hello-world'
>>> key_split(b'hello-world-1')
u'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
u'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
u'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split('-')
result = words[0].lstrip("'(\"")
for word in words[1:]:
if word.isalpha() and not (len(word) == 8 and
hex_pattern.match(word) is not None):
result += '-' + word
else:
break
if len(result) == 32 and re.match(r'[a-f0-9]{32}', result):
return 'data'
else:
if result[0] == '<':
result = result.strip('<>').split()[0].split('.')[-1]
return result
except Exception:
return 'Other'
_method_cache = {}
class methodcaller(object):
"""Return a callable object that calls the given method on its operand.
Unlike the builtin `methodcaller`, this class is serializable"""
__slots__ = ('method',)
func = property(lambda self: self.method) # For `funcname` to work
def __new__(cls, method):
if method in _method_cache:
return _method_cache[method]
self = object.__new__(cls)
self.method = method
_method_cache[method] = self
return self
def __call__(self, obj, *args, **kwargs):
return getattr(obj, self.method)(*args, **kwargs)
def __reduce__(self):
return (methodcaller, (self.method,))
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.method)
__repr__ = __str__
class MethodCache(object):
"""Attribute access on this object returns a methodcaller for that
attribute.
Examples
--------
>>> a = [1, 3, 3]
>>> M.count(a, 3) == a.count(3)
True
"""
__getattr__ = staticmethod(methodcaller)
__dir__ = lambda self: list(_method_cache)
M = MethodCache()
class SerializableLock(object):
_locks = WeakValueDictionary()
""" A Serializable per-process Lock
This wraps a normal ``threading.Lock`` object and satisfies the same
interface. However, this lock can also be serialized and sent to different
processes. It will not block concurrent operations between processes (for
this you should look at ``multiprocessing.Lock`` or ``locket.lock_file``
but will consistently deserialize into the same lock.
So if we make a lock in one process::
lock = SerializableLock()
And then send it over to another process multiple times::
bytes = pickle.dumps(lock)
a = pickle.loads(bytes)
b = pickle.loads(bytes)
Then the deserialized objects will operate as though they were the same
lock, and collide as appropriate.
This is useful for consistently protecting resources on a per-process
level.
The creation of locks is itself not threadsafe.
"""
def __init__(self, token=None):
self.token = token or str(uuid.uuid4())
if self.token in SerializableLock._locks:
self.lock = SerializableLock._locks[self.token]
else:
self.lock = Lock()
SerializableLock._locks[self.token] = self.lock
def acquire(self, *args):
return self.lock.acquire(*args)
def release(self, *args):
return self.lock.release(*args)
def __enter__(self):
self.lock.__enter__()
def __exit__(self, *args):
self.lock.__exit__(*args)
@property
def locked(self):
return self.locked
def __getstate__(self):
return self.token
def __setstate__(self, token):
self.__init__(token)
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.token)
__repr__ = __str__
def effective_get(get=None, collection=None):
"""Get the effective get method used in a given situation"""
collection_get = collection._default_get if collection else None
return get or _globals.get('get') or collection_get
def get_scheduler_lock(get=None, collection=None):
"""Get an instance of the appropriate lock for a certain situation based on
scheduler used."""
actual_get = effective_get(get, collection)
if actual_get == multiprocessing.get:
return mp.Manager().Lock()
return SerializableLock()
| bsd-3-clause |
benoitsteiner/tensorflow | tensorflow/examples/learn/text_classification.py | 39 | 5106 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for DNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.layers.python.layers import encoders
learn = tf.contrib.learn
FLAGS = None
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def bag_of_words_model(features, target):
"""A bag-of-words model. Note it disregards the word order in the text."""
target = tf.one_hot(target, 15, 1, 0)
features = encoders.bow_encoder(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
logits = tf.contrib.layers.fully_connected(features, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def rnn_model(features, target):
"""RNN model to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unstack(word_vectors, axis=1)
# Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE.
cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE)
# Create an unrolled Recurrent Neural Networks to length of
# MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit.
_, encoding = tf.contrib.rnn.static_rnn(cell, word_list, dtype=tf.float32)
# Given encoding of RNN, take encoding of last step (e.g hidden size of the
# neural network of last step) and pass it as features for logistic
# regression over output classes.
target = tf.one_hot(target, 15, 1, 0)
logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
# Create a training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adam',
learning_rate=0.01)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_transform_train = vocab_processor.fit_transform(x_train)
x_transform_test = vocab_processor.transform(x_test)
x_train = np.array(list(x_transform_train))
x_test = np.array(list(x_transform_test))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
# Switch between rnn_model and bag_of_words_model to test different models.
model_fn = rnn_model
if FLAGS.bow_model:
model_fn = bag_of_words_model
classifier = learn.Estimator(model_fn=model_fn)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
parser.add_argument(
'--bow_model',
default=False,
help='Run with BOW model instead of RNN.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
ashhher3/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
aetilley/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
andyh616/mne-python | mne/viz/tests/test_ica.py | 7 | 6812 | # Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: Simplified BSD
import os.path as op
import warnings
from numpy.testing import assert_raises
from mne import io, read_events, Epochs, read_cov
from mne import pick_types
from mne.utils import run_tests_if_main, requires_sklearn
from mne.viz.utils import _fake_click
from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
# Set our plotters to test mode
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
warnings.simplefilter('always') # enable b/c these tests throw warnings
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.2
def _get_raw(preload=False):
return io.Raw(raw_fname, preload=preload)
def _get_events():
return read_events(event_name)
def _get_picks(raw):
return [0, 1, 2, 6, 7, 8, 12, 13, 14] # take a only few channels
def _get_epochs():
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks,
baseline=(None, 0))
return epochs
@requires_sklearn
def test_plot_ica_components():
"""Test plotting of ICA solutions
"""
import matplotlib.pyplot as plt
raw = _get_raw()
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
ica_picks = _get_picks(raw)
ica.fit(raw, picks=ica_picks)
warnings.simplefilter('always', UserWarning)
with warnings.catch_warnings(record=True):
for components in [0, [0], [0, 1], [0, 1] * 2, None]:
ica.plot_components(components, image_interp='bilinear', res=16)
ica.info = None
assert_raises(RuntimeError, ica.plot_components, 1)
plt.close('all')
@requires_sklearn
def test_plot_ica_sources():
"""Test plotting of ICA panel
"""
import matplotlib.pyplot as plt
raw = io.Raw(raw_fname, preload=False)
raw.crop(0, 1, copy=False)
raw.preload_data()
picks = _get_picks(raw)
epochs = _get_epochs()
raw.pick_channels([raw.ch_names[k] for k in picks])
ica_picks = pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
ica = ICA(n_components=2, max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=ica_picks)
raw.info['bads'] = ['MEG 0113']
assert_raises(RuntimeError, ica.plot_sources, inst=raw)
ica.plot_sources(epochs)
epochs.info['bads'] = ['MEG 0113']
assert_raises(RuntimeError, ica.plot_sources, inst=epochs)
epochs.info['bads'] = []
with warnings.catch_warnings(record=True): # no labeled objects mpl
ica.plot_sources(epochs.average())
evoked = epochs.average()
fig = ica.plot_sources(evoked)
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax,
[line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax,
[ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
# plot with bad channels excluded
ica.plot_sources(evoked, exclude=[0])
ica.exclude = [0]
ica.plot_sources(evoked) # does the same thing
assert_raises(ValueError, ica.plot_sources, 'meeow')
plt.close('all')
@requires_sklearn
def test_plot_ica_overlay():
"""Test plotting of ICA cleaning
"""
import matplotlib.pyplot as plt
raw = _get_raw(preload=True)
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=picks)
# don't test raw, needs preload ...
ecg_epochs = create_ecg_epochs(raw, picks=picks)
ica.plot_overlay(ecg_epochs.average())
eog_epochs = create_eog_epochs(raw, picks=picks)
ica.plot_overlay(eog_epochs.average())
assert_raises(ValueError, ica.plot_overlay, raw[:2, :3][0])
ica.plot_overlay(raw)
plt.close('all')
@requires_sklearn
def test_plot_ica_scores():
"""Test plotting of ICA scores
"""
import matplotlib.pyplot as plt
raw = _get_raw()
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=picks)
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1])
assert_raises(ValueError, ica.plot_scores, [0.2])
plt.close('all')
@requires_sklearn
def test_plot_instance_components():
"""Test plotting of components as instances of raw and epochs."""
import matplotlib.pyplot as plt
raw = _get_raw()
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=picks)
fig = ica.plot_sources(raw, exclude=[0], title='Components')
fig.canvas.key_press_event('down')
fig.canvas.key_press_event('up')
fig.canvas.key_press_event('right')
fig.canvas.key_press_event('left')
fig.canvas.key_press_event('o')
fig.canvas.key_press_event('-')
fig.canvas.key_press_event('+')
fig.canvas.key_press_event('=')
fig.canvas.key_press_event('pageup')
fig.canvas.key_press_event('pagedown')
fig.canvas.key_press_event('home')
fig.canvas.key_press_event('end')
fig.canvas.key_press_event('f11')
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax, [-0.1, 0.9]) # click on y-label
fig.canvas.key_press_event('escape')
plt.close('all')
epochs = _get_epochs()
fig = ica.plot_sources(epochs, exclude=[0], title='Components')
fig.canvas.key_press_event('down')
fig.canvas.key_press_event('up')
fig.canvas.key_press_event('right')
fig.canvas.key_press_event('left')
fig.canvas.key_press_event('o')
fig.canvas.key_press_event('-')
fig.canvas.key_press_event('+')
fig.canvas.key_press_event('=')
fig.canvas.key_press_event('pageup')
fig.canvas.key_press_event('pagedown')
fig.canvas.key_press_event('home')
fig.canvas.key_press_event('end')
fig.canvas.key_press_event('f11')
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax, [-0.1, 0.9]) # click on y-label
fig.canvas.key_press_event('escape')
plt.close('all')
run_tests_if_main()
| bsd-3-clause |
mattilyra/scikit-learn | sklearn/tests/test_pipeline.py | 23 | 15392 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA(svd_solver='full')
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver='randomized', random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_X1d_inverse_transform():
transformer = TransfT()
pipeline = make_pipeline(transformer)
X = np.ones(10)
msg = "1d X will not be reshaped in pipeline.inverse_transform"
assert_warns_message(FutureWarning, msg, pipeline.inverse_transform, X)
| bsd-3-clause |
mcdeaton13/dynamic | Data/Calibration/Firm Calibration/parameters/depreciation/processing/read_inventories.py | 4 | 4135 | '''
-------------------------------------------------------------------------------
Last updated 5/26/2015
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
Packages
-------------------------------------------------------------------------------
'''
import os.path
import numpy as np
import pandas as pd
import xlrd
#
import naics_processing as naics
'''
'''
def read_inventories(output_tree, data_folder):
# The directory with inventory data:
inv_folder = os.path.abspath(data_folder + "\\Inventories")
# Opening BEA's excel file on depreciable assets by industry:
inv_book = xlrd.open_workbook(os.path.abspath(
inv_folder + "\\Inventories.xls"))
sht0 = inv_book.sheet_by_index(0)
num_rows = sht0.nrows
num_cols = sht0.ncols
#Find the starting index in worksheet.
cur_index = naics.search_ws(sht0, 1, 25, True, [0,0], True)
check_index = naics.search_ws(sht0, "line", 20)
if(cur_index[1] != check_index[1]):
print "ERROR"
# Breaking down by corporate tax status:
corp_types = ["C Corporations",
"Corporate general partners",
"Corporate limited partners"]
non_corp_types = ["S Corporations",
"Individual general partners",
"Individual limited partners",
"Partnership general partners",
"Partnership limited partners",
"Tax-exempt organization general partners",
"Tax-exempt organization limited partners",
"Nominee and other general partners",
"Nominee and other limited partners",
"Sole Proprietors"]
# Reading in the crosswalk:
inv_cross = pd.read_csv(os.path.abspath(
inv_folder + "\\Inventories_Crosswalk.csv"))
# Creating a tree for the inventory data:
inv_tree = naics.load_naics(data_folder + "\\2012_NAICS_Codes.csv")
#
data_cols = ["All", "Corp", "Non-Corp"]
for i in inv_tree.enum_inds:
i.data.append(("Inventories",
pd.DataFrame(np.zeros((1, len(data_cols))),
columns = data_cols)))
#
inv_data = np.zeros(inv_cross.shape[0])
#
cross_index = 0
for i in xrange(cur_index[0], num_rows):
if(cross_index >= inv_cross.shape[0]):
break
cur_list = str(sht0.cell_value(i, cur_index[1])).strip()
cur_name = str(sht0.cell_value(i, cur_index[1]+1)).strip()
checks = ((str(cur_list) == str(inv_cross["List"][cross_index])) and
(str(cur_name) == str(inv_cross["Industry"][cross_index])))
if(checks):
cross_index += 1
try:
cur_value = float(sht0.cell_value(i, num_cols-1))
except ValueError:
continue
inv_data[cross_index-1] = cur_value
# Data is in billions:
inv_data[cross_index-1] = (10**9) * inv_data[cross_index-1]
#
for i in xrange(0, inv_cross.shape[0]):
cur_codes = inv_cross["NAICS"][i].strip().split(".")
proportions = naics.get_proportions(cur_codes, output_tree, "INV")
for j in xrange(0, proportions.shape[1]):
cur_ind = inv_tree.enum_inds[int(proportions.iloc[0,j])]
prev_ind = output_tree.enum_inds[int(proportions.iloc[0,j])]
prev_df = prev_ind.data.dfs["INV"]
if(sum(prev_df.iloc[0, :]) != 0):
cur_dfs = ((prev_df/sum(prev_df.iloc[0,:])) *
(inv_data[i] * proportions.iloc[1,j]))
inv_df = cur_ind.data.dfs["Inventories"]
inv_df["All"] += sum(cur_dfs.iloc[0,:])
for k in corp_types:
inv_df["Corp"] += cur_dfs[k][0]
for k in non_corp_types:
inv_df["Non-Corp"] += cur_dfs[k][0]
#
return inv_tree
| mit |
niketanpansare/systemml | scripts/perftest/python/google_docs/stats.py | 15 | 3540 | #!/usr/bin/env python3
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import argparse
import os
import pprint
from os.path import join
import matplotlib.pyplot as plt
from gdocs_utils import auth
# Dict
# {algo_name : [algo_1.0': t1, 'algo_2.0': t2]}
def get_formatted_data(sheet_data):
"""
Read all the data from google sheets and transforms it into a dictionary that can be
use for plotting later
"""
algo_dict = {}
for i in sheet_data:
inn_count = 0
data = []
for key, val in i.items():
inn_count += 1
if inn_count < 3:
data.append(key)
data.append(val)
if inn_count == 2:
t1, v1, _, v2 = data
if len(str(v2)) > 0:
if v1 not in algo_dict:
algo_dict[v1] = [{t1: v2}]
else:
algo_dict[v1].append({t1: v2})
inn_count = 0
data = []
return algo_dict
def plot(x, y, xlab, ylab, title):
"""
Save plots to the current folder based on the arguments
"""
CWD = os.getcwd()
PATH = join(CWD, title)
width = .35
plt.bar(x, y, color="red", width=width)
plt.xticks(x)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
plt.savefig(PATH + '.png')
print('Plot {} generated'.format(title))
return plt
# Example Usage
# ./stats.py --auth ../key/client_json.json --exec-mode singlenode
if __name__ == '__main__':
execution_mode = ['hybrid_spark', 'singlenode']
cparser = argparse.ArgumentParser(description='System-ML Statistics Script')
cparser.add_argument('--auth', help='Location to read auth file',
required=True, metavar='')
cparser.add_argument('--exec-type', help='Execution mode', choices=execution_mode,
required=True, metavar='')
cparser.add_argument('--plot', help='Algorithm to plot', metavar='')
args = cparser.parse_args()
sheet = auth(args.auth, args.exec_type)
all_data = sheet.get_all_records()
plot_data = get_formatted_data(all_data)
if args.plot is not None:
print(plot_data[args.plot])
title = args.plot
ylab = 'Time in sec'
xlab = 'Version'
x = []
y = []
for i in plot_data[args.plot]:
version = list(i.keys())[0]
time = list(i.values())[0]
y.append(time)
x.append(version)
x = list(map(lambda x: float(x.split('_')[1]), x))
plot(x, y, xlab, ylab, title)
else:
pprint.pprint(plot_data, width=1) | apache-2.0 |
cuttlefishh/emp | code/01-metadata/metadata_template_generator.py | 1 | 4911 | #!/usr/bin/env python
import click
import pandas as pd
import re
# Hard-coded variables
investigation_type = 'metagenome'
# Function: return dataframe of environmental package-specific metadata items
# A single environmental package (soil) or list can be provided (soil,water).
def show_items_of_env_pkg(df_env_pkg, list_of_env_pkg):
"""Return dataframe of environmental package-specific metadata items"""
df_items = df_env_pkg[df_env_pkg['Environmental package'].isin(list_of_env_pkg)]
return df_items
# Function: return dataframe of metadata template
def create_template_for_env_pkg(df_QiitaEBI, df_MIMS, df_env_pkg, list_of_env_pkg, number_of_samples, sample_prefix):
"""Return dataframe of metadata template"""
# get headers/requirement/example of Qiita-EBI/MIMS/env_pkg columns
pkg_items = show_items_of_env_pkg(df_env_pkg, list_of_env_pkg)
headers_env_pkg = pkg_items['Structured comment name'].values
require_env_pkg = pkg_items['Requirement']
example_env_pkg = pkg_items['Value syntax']
headers_all = list(df_QiitaEBI.iloc[0]) + list(df_MIMS.iloc[0]) + list(headers_env_pkg)
require_all = pd.concat([df_QiitaEBI.iloc[1], df_MIMS.iloc[1], require_env_pkg])
example_all = pd.concat([df_QiitaEBI.iloc[2], df_MIMS.iloc[2], example_env_pkg])
# populate template dataframe
df_template = pd.DataFrame(columns=headers_all, dtype=object)
df_template.loc['Requirement'] = require_all.values
df_template.loc['Format'] = example_all.values
string_of_env_pkg = re.sub(r'\W', '.', '.'.join(list_of_env_pkg))
for i in range(0, number_of_samples):
df_template.loc[i+1] = ['' for x in range(len(df_template.columns))]
df_template.loc[i+1]['sample_name'] = '%s.%s.%s' % (sample_prefix, string_of_env_pkg, i+1)
df_template.loc[i+1]['investigation_type'] = investigation_type
df_template.loc[i+1]['env_package'] = ' or '.join(list_of_env_pkg)
return df_template
@click.command()
@click.option('--qiita_ebi_mims_path', required=True, type=click.Path(resolve_path=True, readable=True, exists=True), help='Excel file with Qiita/EBI and MIMS required fields. Example: Qiita_EBI_MIMS_v1.xlsx')
@click.option('--migs_mims_path', required=True, type=click.Path(resolve_path=True, readable=True, exists=True), help='Excel file with MIxS standards. Example: MIGS_MIMS_v4.xls')
@click.option('--list_of_env_pkg', required=True, type=click.STRING, help="One (recommended) or more (separated by commas) environmental package. Choose from: air, built environment, host-associated, human-associated, human-skin, human-oral, human-gut, human-vaginal, microbial mat/biofilm, misc environment, plant-associated, sediment, soil, wastewater/sludge, water")
@click.option('--number_of_samples', required=True, type=click.INT, help='Number of samples (per environmental package) to create rows for in the template')
@click.option('--sample_prefix', required=True, type=click.STRING, help='Prefix string to prepend to sample numbers in row indexes. Example: Metcalf40 (EMP500 PI and study number)')
# Main function: generate metadata template and readme csv files
def generate_metadata_template(qiita_ebi_mims_path, migs_mims_path, list_of_env_pkg, number_of_samples, sample_prefix):
"""Generate metadata template and readme csv files"""
# Qiita/EBI/MIMS Excel file to DataFrames
df_QiitaEBI = pd.read_excel(qiita_ebi_mims_path, sheetname='QiitaEBI', header=None)
df_MIMS = pd.read_excel(qiita_ebi_mims_path, sheetname='MIMS', header=None)
list_of_env_pkg = list_of_env_pkg.split(",")
# MIGS/MIMS Excel file to DataFrames
df_README = pd.read_excel(migs_mims_path, sheetname='README', header=None)
df_MIGS_MIMS = pd.read_excel(migs_mims_path, sheetname='MIGS_MIMS', header=0, index_col=0)
df_env_pkg = pd.read_excel(migs_mims_path, sheetname='environmental_packages', header=0)
# generate template file
df_template = create_template_for_env_pkg(df_QiitaEBI, df_MIMS, df_env_pkg, list_of_env_pkg, number_of_samples, sample_prefix)
string_of_env_pkg = re.sub(r'\W', '_', '_'.join(list_of_env_pkg))
df_template.to_csv('%s_%s_%s_samples.csv' % (sample_prefix, string_of_env_pkg, number_of_samples), index_label='index')
# generate info file
df_MIMS_select = df_MIGS_MIMS[df_MIGS_MIMS.Section.isin(['investigation', 'environment', 'migs/mims/mimarks extension'])]
df_MIMS_select.to_csv('README_MIMS_metadata.csv')
df_env_pkg_select = show_items_of_env_pkg(df_env_pkg, list_of_env_pkg)
del df_env_pkg_select['Environmental package']
df_env_pkg_select.set_index('Structured comment name', inplace=True)
string_of_env_pkg = re.sub(r'\W', '_', '_'.join(list_of_env_pkg))
df_env_pkg_select.to_csv('README_%s_metadata.csv' % string_of_env_pkg)
# Execute main function
if __name__ == '__main__':
generate_metadata_template()
| bsd-3-clause |
srinathv/vispy | examples/basics/plotting/mpl_plot.py | 14 | 1579 | # -*- coding: utf-8 -*-
# vispy: testskip
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Example demonstrating how to use vispy.pyplot, which uses mplexporter
to convert matplotlib commands to vispy draw commands.
Requires matplotlib.
"""
import numpy as np
# You can use either matplotlib or vispy to render this example:
# import matplotlib.pyplot as plt
import vispy.mpl_plot as plt
from vispy.io import read_png, load_data_file
n = 200
freq = 10
fs = 100.
t = np.arange(n) / fs
tone = np.sin(2*np.pi*freq*t)
noise = np.random.RandomState(0).randn(n)
signal = tone + noise
magnitude = np.abs(np.fft.fft(signal))
freqs = np.fft.fftfreq(n, 1. / fs)
flim = n // 2
# Signal
fig = plt.figure()
ax = plt.subplot(311)
ax.imshow(read_png(load_data_file('pyplot/logo.png')))
ax = plt.subplot(312)
ax.plot(t, signal, 'k-')
# Frequency content
ax = plt.subplot(313)
idx = np.argmax(magnitude[:flim])
ax.text(freqs[idx], magnitude[idx], 'Max: %s Hz' % freqs[idx],
verticalalignment='top')
ax.plot(freqs[:flim], magnitude[:flim], 'k-o')
plt.draw()
# NOTE: show() has currently been overwritten to convert to vispy format, so:
# 1. It must be called to show the results, and
# 2. Any plotting commands executed after this will not take effect.
# We are working to remove this limitation.
if __name__ == '__main__':
plt.show(True)
| bsd-3-clause |
ChinaQuants/bokeh | bokeh/compat/bokeh_renderer.py | 2 | 17762 | "Supporting objects and functions to convert Matplotlib objects into Bokeh."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import itertools
import warnings
import matplotlib as mpl
import numpy as np
import pandas as pd
from six import string_types
from ..models import (ColumnDataSource, FactorRange, DataRange1d, DatetimeAxis, GlyphRenderer,
Grid, GridPlot, LinearAxis, Plot, CategoricalAxis, Legend)
from ..models.glyphs import (Asterisk, Circle, Cross, Diamond, InvertedTriangle,
Line, MultiLine, Patches, Square, Text, Triangle, X)
from ..plotting import DEFAULT_TOOLS
from ..plotting_helpers import _process_tools_arg
from .mplexporter.renderers import Renderer
from .mpl_helpers import convert_dashes, get_props_cycled, is_ax_end, xkcd_line
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class BokehRenderer(Renderer):
def __init__(self, pd_obj, xkcd):
"Initial setup."
self.fig = None
self.pd_obj = pd_obj
self.xkcd = xkcd
self.zorder = {}
self.handles = {}
def open_figure(self, fig, props):
"Get the main plot properties and create the plot."
self.width = int(props['figwidth'] * props['dpi'])
self.height = int(props['figheight'] * props['dpi'])
self.plot = Plot(x_range=DataRange1d(),
y_range=DataRange1d(),
plot_width=self.width,
plot_height=self.height)
def close_figure(self, fig):
"Complete the plot: add tools."
# Add tools
tool_objs = _process_tools_arg(self.plot, DEFAULT_TOOLS)
self.plot.add_tools(*tool_objs)
# Simple or Grid plot setup
if len(fig.axes) <= 1:
self.fig = self.plot
self.plot.renderers.sort(key=lambda x: self.zorder.get(x._id, 0))
else:
# This list comprehension splits the plot.renderers list at the "marker"
# points returning small sublists corresponding with each subplot.
subrends = [list(x[1]) for x in itertools.groupby(
self.plot.renderers, lambda x: is_ax_end(x)) if not x[0]]
plots = []
for i, axes in enumerate(fig.axes):
# create a new plot for each subplot
_plot = Plot(x_range=self.plot.x_range,
y_range=self.plot.y_range,
plot_width=self.width,
plot_height=self.height)
_plot.title = ""
# and add new tools
_tool_objs = _process_tools_arg(_plot, DEFAULT_TOOLS)
_plot.add_tools(*_tool_objs)
# clean the plot ref from axis and grids
_plot_rends = subrends[i]
for r in _plot_rends:
if not isinstance(r, GlyphRenderer):
r.plot = None
# add all the renderers into the new subplot
for r in _plot_rends:
if isinstance(r, GlyphRenderer):
_plot.renderers.append(r)
elif isinstance(r, Grid):
_plot.add_layout(r)
else:
if r in self.plot.below:
_plot.add_layout(r, 'below')
elif r in self.plot.above:
_plot.add_layout(r, 'above')
elif r in self.plot.left:
_plot.add_layout(r, 'left')
elif r in self.plot.right:
_plot.add_layout(r, 'right')
_plot.renderers.sort(key=lambda x: self.zorder.get(x._id, 0))
plots.append(_plot)
(a, b, c) = fig.axes[0].get_geometry()
p = np.array(plots)
n = np.resize(p, (a, b))
grid = GridPlot(children=n.tolist())
self.fig = grid
def open_axes(self, ax, props):
"Get axes data and create the axes and grids"
# Get axes, title and grid into class attributes.
self.ax = ax
self.plot.title = ax.get_title()
# to avoid title conversion by draw_text later
#Make sure that all information about the axes are passed to the properties
if props.get('xscale', False):
props['axes'][0]['scale'] = props['xscale']
if props.get('yscale', False):
props['axes'][1]['scale'] = props['yscale']
# Add axis
for props in props['axes']:
if props['position'] == "bottom" : location, dim, thing = "below", 0, ax.xaxis
elif props['position'] == "top" : location, dim, thing = "above", 0, ax.xaxis
else: location, dim, thing = props['position'], 1, ax.yaxis
baxis = self.make_axis(thing, location, props)
if dim==0:
gridlines = ax.get_xgridlines()
else:
gridlines = ax.get_ygridlines()
if gridlines:
self.make_grid(baxis, dim, gridlines[0])
def close_axes(self, ax):
"Complete the axes adding axes-dependent plot props"
background_fill = ax.get_axis_bgcolor()
if background_fill == 'w':
background_fill = 'white'
self.plot.background_fill = background_fill
if self.xkcd:
self.plot.title_text_font = "Comic Sans MS, Textile, cursive"
self.plot.title_text_font_style = "bold"
self.plot.title_text_color = "black"
# Add a "marker" Glyph to help the plot.renderers splitting in the GridPlot build
dummy_source = ColumnDataSource(data=dict(name="ax_end"))
self.plot.renderers.append(GlyphRenderer(data_source=dummy_source, glyph=X()))
def open_legend(self, legend, props):
lgnd = Legend(orientation="top_right")
try:
for label, obj in zip(props['labels'], props['handles']):
lgnd.legends.append((label, [self.handles[id(obj)]]))
self.plot.add_layout(lgnd)
except KeyError:
pass
def close_legend(self, legend):
pass
def draw_line(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Line glyph."
_x = data[:, 0]
if self.pd_obj is True:
try:
x = [pd.Period(ordinal=int(i), freq=self.ax.xaxis.freq).to_timestamp() for i in _x]
except AttributeError: # we probably can make this one more intelligent later
x = _x
else:
x = _x
y = data[:, 1]
if self.xkcd:
x, y = xkcd_line(x, y)
line = Line()
source = ColumnDataSource()
line.x = source.add(x)
line.y = source.add(y)
line.line_color = style['color']
line.line_width = style['linewidth']
line.line_alpha = style['alpha']
line.line_dash = [] if style['dasharray'] is "none" else [int(i) for i in style['dasharray'].split(",")] # str2list(int)
# line.line_join = line2d.get_solid_joinstyle() # not in mplexporter
# line.line_cap = cap_style_map[line2d.get_solid_capstyle()] # not in mplexporter
if self.xkcd:
line.line_width = 3
r = self.plot.add_glyph(source, line)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Marker glyph."
x = data[:, 0]
y = data[:, 1]
marker_map = {
"o": Circle,
"s": Square,
"+": Cross,
"^": Triangle,
"v": InvertedTriangle,
"x": X,
"d": Diamond,
"D": Diamond,
"*": Asterisk,
}
# Not all matplotlib markers are currently handled; fall back to Circle if we encounter an
# unhandled marker. See http://matplotlib.org/api/markers_api.html for a list of markers.
try:
marker = marker_map[style['marker']]()
except KeyError:
warnings.warn("Unable to handle marker: %s; defaulting to Circle" % style['marker'])
marker = Circle()
source = ColumnDataSource()
marker.x = source.add(x)
marker.y = source.add(y)
marker.line_color = style['edgecolor']
marker.fill_color = style['facecolor']
marker.line_width = style['edgewidth']
marker.size = style['markersize']
marker.fill_alpha = marker.line_alpha = style['alpha']
r = self.plot.add_glyph(source, marker)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
pass
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"Given a mpl text instance create a Bokeh Text glyph."
# mpl give you the title and axes names as a text object (with specific locations)
# inside the plot itself. That does not make sense inside Bokeh, so we
# just skip the title and axes names from the conversion and covert any other text.
if text_type in ['xlabel', 'ylabel', 'title']:
return
if coordinates != 'data':
return
x, y = position
text = Text(x=x, y=y, text=[text])
alignment_map = {"center": "middle", "top": "top", "bottom": "bottom", "baseline": "bottom"}
# baseline not implemented in Bokeh, deafulting to bottom.
text.text_alpha = style['alpha']
text.text_font_size = "%dpx" % style['fontsize']
text.text_color = style['color']
text.text_align = style['halign']
text.text_baseline = alignment_map[style['valign']]
text.angle = style['rotation']
## Using get_fontname() works, but it's oftentimes not available in the browser,
## so it's better to just use the font family here.
#text.text_font = mplText.get_fontname()) not in mplexporter
#text.text_font = mplText.get_fontfamily()[0] # not in mplexporter
#text.text_font_style = fontstyle_map[mplText.get_fontstyle()] # not in mplexporter
## we don't really have the full range of font weights, but at least handle bold
#if mplText.get_weight() in ("bold", "heavy"):
#text.text_font_style = bold
source = ColumnDataSource()
r = self.plot.add_glyph(source, text)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
pass
def make_axis(self, ax, location, props):
"Given a mpl axes instance, returns a Bokeh LinearAxis object."
# TODO:
# * handle log scaling
# * map `labelpad` to `major_label_standoff`
# * deal with minor ticks once BokehJS supports them
# * handle custom tick locations once that is added to bokehJS
tf = props['tickformat']
if tf and any(isinstance(x, string_types) for x in tf):
laxis = CategoricalAxis(axis_label=ax.get_label_text())
rng = FactorRange(factors=[str(x) for x in tf], offset=-1.0)
if location in ["above", "below"]:
self.plot.x_range = rng
else:
self.plot.y_range = rng
else:
if props['scale'] == "linear":
laxis = LinearAxis(axis_label=ax.get_label_text())
elif props['scale'] == "date":
laxis = DatetimeAxis(axis_label=ax.get_label_text())
self.plot.add_layout(laxis, location)
# First get the label properties by getting an mpl.Text object
label = ax.get_label()
self.text_props(label, laxis, prefix="axis_label_")
# Set the tick properties (for now just turn off if necessary)
# TODO: mirror tick properties
if props['nticks'] == 0:
laxis.major_tick_line_color = None
laxis.minor_tick_line_color = None
laxis.major_label_text_color = None
# To get the tick label format, we look at the first of the tick labels
# and assume the rest are formatted similarly.
ticklabels = ax.get_ticklabels()
if ticklabels:
self.text_props(ticklabels[0], laxis, prefix="major_label_")
#newaxis.bounds = axis.get_data_interval() # I think this is the right func...
if self.xkcd:
laxis.axis_line_width = 3
laxis.axis_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.axis_label_text_font_style = "bold"
laxis.axis_label_text_color = "black"
laxis.major_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.major_label_text_font_style = "bold"
laxis.major_label_text_color = "black"
return laxis
def make_grid(self, baxis, dimension, gridline):
"Given a mpl axes instance, returns a Bokeh Grid object."
lgrid = Grid(dimension=dimension,
ticker=baxis.ticker,
grid_line_color=gridline.get_color(),
grid_line_width=gridline.get_linewidth())
self.plot.add_layout(lgrid)
def make_line_collection(self, col):
"Given a mpl collection instance create a Bokeh MultiLine glyph."
xydata = col.get_segments()
t_xydata = [np.transpose(seg) for seg in xydata]
xs = [t_xydata[x][0] for x in range(len(t_xydata))]
ys = [t_xydata[x][1] for x in range(len(t_xydata))]
if self.xkcd:
xkcd_xs = [xkcd_line(xs[i], ys[i])[0] for i in range(len(xs))]
xkcd_ys = [xkcd_line(xs[i], ys[i])[1] for i in range(len(ys))]
xs = xkcd_xs
ys = xkcd_ys
multiline = MultiLine()
source = ColumnDataSource()
multiline.xs = source.add(xs)
multiline.ys = source.add(ys)
self.multiline_props(source, multiline, col)
r = self.plot.add_glyph(source, multiline)
self.zorder[r._id] = col.zorder
self.handles[id(col)] = r
def make_poly_collection(self, col):
"Given a mpl collection instance create a Bokeh Patches glyph."
xs = []
ys = []
for path in col.get_paths():
for sub_poly in path.to_polygons():
xx, yy = sub_poly.transpose()
xs.append(xx)
ys.append(yy)
patches = Patches()
source = ColumnDataSource()
patches.xs = source.add(xs)
patches.ys = source.add(ys)
self.patches_props(source, patches, col)
r = self.plot.add_glyph(source, patches)
self.zorder[r._id] = col.zorder
self.handles[id(col)] = r
def multiline_props(self, source, multiline, col):
"Takes a mpl collection object to extract and set up some Bokeh multiline properties."
colors = get_props_cycled(col, col.get_colors(), fx=lambda x: mpl.colors.rgb2hex(x))
widths = get_props_cycled(col, col.get_linewidth())
multiline.line_color = source.add(colors)
multiline.line_width = source.add(widths)
multiline.line_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
multiline.line_dash_offset = convert_dashes(offset)
multiline.line_dash = list(convert_dashes(tuple(on_off)))
def patches_props(self, source, patches, col):
"Takes a mpl collection object to extract and set up some Bokeh patches properties."
face_colors = get_props_cycled(col, col.get_facecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
patches.fill_color = source.add(face_colors)
edge_colors = get_props_cycled(col, col.get_edgecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
patches.line_color = source.add(edge_colors)
widths = get_props_cycled(col, col.get_linewidth())
patches.line_width = source.add(widths)
patches.line_alpha = col.get_alpha()
patches.fill_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
patches.line_dash_offset = convert_dashes(offset)
patches.line_dash = list(convert_dashes(tuple(on_off)))
def text_props(self, text, obj, prefix=""):
fp = text.get_font_properties()
setattr(obj, prefix+"text_font", fp.get_family()[0])
setattr(obj, prefix+"text_font_size", "%fpt" % fp.get_size_in_points())
setattr(obj, prefix+"text_font_style", fp.get_style())
| bsd-3-clause |
stganser/polyite | src/polyite/fitness/scikit_learn/init.py | 1 | 2788 | from enum import Enum
import csv
import math
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
class LearningAlgorithms(Enum):
cart = 0,
random_forest = 1
class RandomForestsConfig:
def __init__(self, n_tree, max_features):
self.n_tree = n_tree
self.max_features = max_features
def learn(feature_fields, learning_sets_file_names, rescale, min_samples_leaf,
learning_algorithm, random_forest_config):
features_learn = []
classes_learn = []
for path in learning_sets_file_names:
(curr_features, curr_classes) = load_data(path, feature_fields, rescale)
features_learn = features_learn + curr_features
classes_learn = classes_learn + curr_classes
if learning_algorithm is LearningAlgorithms.cart:
clf = tree.DecisionTreeClassifier(min_samples_leaf=min_samples_leaf, criterion="gini")
else:
clf = RandomForestClassifier(n_estimators=random_forest_config.n_tree,
max_features=random_forest_config.max_features,
criterion="gini", bootstrap=True,
min_samples_leaf=min_samples_leaf)
clf = clf.fit(features_learn, classes_learn)
return clf
def rescale_feature_values(features, n_features):
maxima = list(map(lambda col: max(map(lambda row: row[col], features)), range(0, n_features)))
minima = list(map(lambda col: min(map(lambda row: row[col], features)), range(0, n_features)))
return list(map(
lambda row: list(map(lambda col: (row[col] - minima[col]) / (maxima[col] - minima[col]), range(0, n_features))),
features))
# result is sorted, starting from smallest speedup to largest
def load_data(csv_file_name, feature_fields, rescale):
result = []
with open(csv_file_name, newline='') as csvFile:
content = csv.reader(csvFile, delimiter="\t", quotechar="\"")
head = next(content)
fields = {}
for i in range(0, len(head)):
fields[head[i]] = i
for row in content:
curr_result = []
if row[fields["class"]] != "-":
curr_feature_vals = []
for field in feature_fields:
curr_feature_vals.append(float(row[fields[field]]))
curr_result = curr_result + [float(row[fields["class"]])]
curr_result = curr_result + curr_feature_vals
result.append(curr_result)
result = sorted(result, key=lambda r : r[1])
features = list(map(lambda row : row[1:] , result))
classes = list(map(lambda row : row[0] , result))
if rescale:
return rescale_feature_values(features, len(feature_fields)), classes
return features, classes
| mit |
gfyoung/pandas | pandas/tests/io/test_clipboard.py | 2 | 8079 | from textwrap import dedent
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, get_option, read_clipboard
import pandas._testing as tm
from pandas.io.clipboard import clipboard_get, clipboard_set
def build_kwargs(sep, excel):
kwargs = {}
if excel != "default":
kwargs["excel"] = excel
if sep != "default":
kwargs["sep"] = sep
return kwargs
@pytest.fixture(
params=[
"delims",
"utf8",
"utf16",
"string",
"long",
"nonascii",
"colwidth",
"mixed",
"float",
"int",
]
)
def df(request):
data_type = request.param
if data_type == "delims":
return DataFrame({"a": ['"a,\t"b|c', "d\tef´"], "b": ["hi'j", "k''lm"]})
elif data_type == "utf8":
return DataFrame({"a": ["µasd", "Ωœ∑´"], "b": ["øπ∆˚¬", "œ∑´®"]})
elif data_type == "utf16":
return DataFrame(
{"a": ["\U0001f44d\U0001f44d", "\U0001f44d\U0001f44d"], "b": ["abc", "def"]}
)
elif data_type == "string":
return tm.makeCustomDataframe(
5, 3, c_idx_type="s", r_idx_type="i", c_idx_names=[None], r_idx_names=[None]
)
elif data_type == "long":
max_rows = get_option("display.max_rows")
return tm.makeCustomDataframe(
max_rows + 1,
3,
data_gen_f=lambda *args: np.random.randint(2),
c_idx_type="s",
r_idx_type="i",
c_idx_names=[None],
r_idx_names=[None],
)
elif data_type == "nonascii":
return DataFrame({"en": "in English".split(), "es": "en español".split()})
elif data_type == "colwidth":
_cw = get_option("display.max_colwidth") + 1
return tm.makeCustomDataframe(
5,
3,
data_gen_f=lambda *args: "x" * _cw,
c_idx_type="s",
r_idx_type="i",
c_idx_names=[None],
r_idx_names=[None],
)
elif data_type == "mixed":
return DataFrame(
{
"a": np.arange(1.0, 6.0) + 0.01,
"b": np.arange(1, 6).astype(np.int64),
"c": list("abcde"),
}
)
elif data_type == "float":
return tm.makeCustomDataframe(
5,
3,
data_gen_f=lambda r, c: float(r) + 0.01,
c_idx_type="s",
r_idx_type="i",
c_idx_names=[None],
r_idx_names=[None],
)
elif data_type == "int":
return tm.makeCustomDataframe(
5,
3,
data_gen_f=lambda *args: np.random.randint(2),
c_idx_type="s",
r_idx_type="i",
c_idx_names=[None],
r_idx_names=[None],
)
else:
raise ValueError
@pytest.fixture
def mock_clipboard(monkeypatch, request):
"""Fixture mocking clipboard IO.
This mocks pandas.io.clipboard.clipboard_get and
pandas.io.clipboard.clipboard_set.
This uses a local dict for storing data. The dictionary
key used is the test ID, available with ``request.node.name``.
This returns the local dictionary, for direct manipulation by
tests.
"""
# our local clipboard for tests
_mock_data = {}
def _mock_set(data):
_mock_data[request.node.name] = data
def _mock_get():
return _mock_data[request.node.name]
monkeypatch.setattr("pandas.io.clipboard.clipboard_set", _mock_set)
monkeypatch.setattr("pandas.io.clipboard.clipboard_get", _mock_get)
yield _mock_data
@pytest.mark.clipboard
def test_mock_clipboard(mock_clipboard):
import pandas.io.clipboard
pandas.io.clipboard.clipboard_set("abc")
assert "abc" in set(mock_clipboard.values())
result = pandas.io.clipboard.clipboard_get()
assert result == "abc"
@pytest.mark.single
@pytest.mark.clipboard
@pytest.mark.usefixtures("mock_clipboard")
class TestClipboard:
def check_round_trip_frame(self, data, excel=None, sep=None, encoding=None):
data.to_clipboard(excel=excel, sep=sep, encoding=encoding)
result = read_clipboard(sep=sep or "\t", index_col=0, encoding=encoding)
tm.assert_frame_equal(data, result)
# Test that default arguments copy as tab delimited
def test_round_trip_frame(self, df):
self.check_round_trip_frame(df)
# Test that explicit delimiters are respected
@pytest.mark.parametrize("sep", ["\t", ",", "|"])
def test_round_trip_frame_sep(self, df, sep):
self.check_round_trip_frame(df, sep=sep)
# Test white space separator
def test_round_trip_frame_string(self, df):
df.to_clipboard(excel=False, sep=None)
result = read_clipboard()
assert df.to_string() == result.to_string()
assert df.shape == result.shape
# Two character separator is not supported in to_clipboard
# Test that multi-character separators are not silently passed
def test_excel_sep_warning(self, df):
with tm.assert_produces_warning():
df.to_clipboard(excel=True, sep=r"\t")
# Separator is ignored when excel=False and should produce a warning
def test_copy_delim_warning(self, df):
with tm.assert_produces_warning():
df.to_clipboard(excel=False, sep="\t")
# Tests that the default behavior of to_clipboard is tab
# delimited and excel="True"
@pytest.mark.parametrize("sep", ["\t", None, "default"])
@pytest.mark.parametrize("excel", [True, None, "default"])
def test_clipboard_copy_tabs_default(self, sep, excel, df, request, mock_clipboard):
kwargs = build_kwargs(sep, excel)
df.to_clipboard(**kwargs)
assert mock_clipboard[request.node.name] == df.to_csv(sep="\t")
# Tests reading of white space separated tables
@pytest.mark.parametrize("sep", [None, "default"])
@pytest.mark.parametrize("excel", [False])
def test_clipboard_copy_strings(self, sep, excel, df):
kwargs = build_kwargs(sep, excel)
df.to_clipboard(**kwargs)
result = read_clipboard(sep=r"\s+")
assert result.to_string() == df.to_string()
assert df.shape == result.shape
def test_read_clipboard_infer_excel(self, request, mock_clipboard):
# gh-19010: avoid warnings
clip_kwargs = {"engine": "python"}
text = dedent(
"""
John James Charlie Mingus
1 2
4 Harry Carney
""".strip()
)
mock_clipboard[request.node.name] = text
df = pd.read_clipboard(**clip_kwargs)
# excel data is parsed correctly
assert df.iloc[1][1] == "Harry Carney"
# having diff tab counts doesn't trigger it
text = dedent(
"""
a\t b
1 2
3 4
""".strip()
)
mock_clipboard[request.node.name] = text
res = pd.read_clipboard(**clip_kwargs)
text = dedent(
"""
a b
1 2
3 4
""".strip()
)
mock_clipboard[request.node.name] = text
exp = pd.read_clipboard(**clip_kwargs)
tm.assert_frame_equal(res, exp)
def test_invalid_encoding(self, df):
msg = "clipboard only supports utf-8 encoding"
# test case for testing invalid encoding
with pytest.raises(ValueError, match=msg):
df.to_clipboard(encoding="ascii")
with pytest.raises(NotImplementedError, match=msg):
pd.read_clipboard(encoding="ascii")
@pytest.mark.parametrize("enc", ["UTF-8", "utf-8", "utf8"])
def test_round_trip_valid_encodings(self, enc, df):
self.check_round_trip_frame(df, encoding=enc)
@pytest.mark.single
@pytest.mark.clipboard
@pytest.mark.parametrize("data", ["\U0001f44d...", "Ωœ∑´...", "abcd..."])
def test_raw_roundtrip(data):
# PR #25040 wide unicode wasn't copied correctly on PY3 on windows
clipboard_set(data)
assert data == clipboard_get()
| bsd-3-clause |
hurdlea/SimpleCV | SimpleCV/Shell/Shell.py | 10 | 7838 | #!/usr/bin/python
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# SimpleCV
# a kinder, gentler machine vision python library
#-----------------------------------------------------------------------
# SimpleCV is an interface for Open Source machine
# vision libraries in Python.
# It provides a concise, readable interface for cameras,
# image manipulation, feature extraction, and format conversion.
# Our mission is to give casual users a comprehensive interface
# for basic machine vision functions and an
# elegant programming interface for advanced users.
#
# more info:
# http://www.simplecv.org
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#load system libraries
from subprocess import call
import platform
import webbrowser
import sys
from SimpleCV.__init__ import *
#Load simpleCV libraries
from SimpleCV.Shell.Tutorial import *
from SimpleCV.Shell.Example import *
try:
from SimpleCV import __version__ as SIMPLECV_VERSION
except ImportError:
SIMPLECV_VERSION = ''
#Command to clear the shell screen
def shellclear():
if platform.system() == "Windows":
return
call("clear")
#method to get magic_* methods working in bpython
def make_magic(method):
def wrapper(*args, **kwargs):
if not args:
return method('', '')
return method('', *args, **kwargs)
return wrapper
def plot(arg):
try:
import matplotlib.pyplot as plt
except ImportError:
logger.warning("Matplotlib is not installed and required")
return
print "args", arg
print "type", type(arg)
plt.plot(arg)
plt.show()
def hist(arg):
try:
import pylab
except ImportError:
logger.warning("pylab is not installed and required")
return
plot(pylab.hist(arg)[1])
def magic_clear(self, arg):
shellclear()
def magic_forums(self, arg):
webbrowser.open('http://help.simplecv.org/questions/')
def magic_walkthrough(self, arg):
webbrowser.open('http://examples.simplecv.org/en/latest/')
def magic_docs(self, arg):
webbrowser.open('http://www.simplecv.org/docs/')
banner = '+-----------------------------------------------------------+\n'
banner += ' SimpleCV '
banner += SIMPLECV_VERSION
banner += ' [interactive shell] - http://simplecv.org\n'
banner += '+-----------------------------------------------------------+\n'
banner += '\n'
banner += 'Commands: \n'
banner += '\t"exit()" or press "Ctrl+ D" to exit the shell\n'
banner += '\t"clear()" to clear the shell screen\n'
banner += '\t"tutorial()" to begin the SimpleCV interactive tutorial\n'
banner += '\t"example()" gives a list of examples you can run\n'
banner += '\t"forums()" will launch a web browser for the help forums\n'
banner += '\t"walkthrough()" will launch a web browser with a walkthrough\n'
banner += '\n'
banner += 'Usage:\n'
banner += '\tdot complete works to show library\n'
banner += '\tfor example: Image().save("/tmp/test.jpg") will dot complete\n'
banner += '\tjust by touching TAB after typing Image().\n'
banner += '\n'
banner += 'Documentation:\n'
banner += '\thelp(Image), ?Image, Image?, or Image()? all do the same\n'
banner += '\t"docs()" will launch webbrowser showing documentation'
banner += '\n'
exit_msg = '\n... [Exiting the SimpleCV interactive shell] ...\n'
def setup_ipython():
try:
import IPython
from IPython.config.loader import Config
from IPython.frontend.terminal.embed import InteractiveShellEmbed
cfg = Config()
cfg.PromptManager.in_template = "SimpleCV:\\#> "
cfg.PromptManager.out_template = "SimpleCV:\\#: "
#~ cfg.InteractiveShellEmbed.prompt_in1 = "SimpleCV:\\#> "
#~ cfg.InteractiveShellEmbed.prompt_out="SimpleCV:\\#: "
scvShell = InteractiveShellEmbed(config=cfg, banner1=banner,
exit_msg=exit_msg)
scvShell.define_magic("tutorial", magic_tutorial)
scvShell.define_magic("clear", magic_clear)
scvShell.define_magic("example", magic_examples)
scvShell.define_magic("forums", magic_forums)
scvShell.define_magic("walkthrough", magic_walkthrough)
scvShell.define_magic("docs", magic_docs)
except ImportError:
try:
from IPython.Shell import IPShellEmbed
argsv = ['-pi1', 'SimpleCV:\\#>', '-pi2', ' .\\D.:', '-po',
'SimpleCV:\\#>', '-nosep']
scvShell = IPShellEmbed(argsv)
scvShell.set_banner(banner)
scvShell.set_exit_msg(exit_msg)
scvShell.IP.api.expose_magic("tutorial", magic_tutorial)
scvShell.IP.api.expose_magic("clear", magic_clear)
scvShell.IP.api.expose_magic("example", magic_examples)
scvShell.IP.api.expose_magic("forums", magic_forums)
scvShell.IP.api.expose_magic("walkthrough", magic_walkthrough)
scvShell.IP.api.expose_magic("docs", magic_docs)
except ImportError:
raise
return scvShell()
def setup_bpython():
import bpython
example = make_magic(magic_examples)
clear = make_magic(magic_clear)
docs = make_magic(magic_docs)
tutorial = make_magic(magic_tutorial)
walkthrough = make_magic(magic_walkthrough)
forums = make_magic(magic_forums)
temp = locals().copy()
temp.update(globals())
return bpython.embed(locals_=temp, banner=banner)
def setup_plain():
import code
return code.interact(banner=banner, local=globals())
def run_notebook(mainArgs):
if IPython.__version__.startswith('1.'):
"""Run the ipython notebook server"""
from IPython.html import notebookapp
from IPython.html.services.kernels import kernelmanager
else:
from IPython.frontend.html.notebook import notebookapp
from IPython.frontend.html.notebook import kernelmanager
code = ""
code += "from SimpleCV import *;"
code += "init_options_handler.enable_notebook();"
kernelmanager.MappingKernelManager.first_beat = 30.0
app = notebookapp.NotebookApp.instance()
mainArgs += [
'--port', '5050',
'--c', code,
]
app.initialize(mainArgs)
app.start()
sys.exit()
def self_update():
URL = "https://github.com/sightmachine/SimpleCV/zipball/master"
command = "pip install -U %s" % URL
if os.getuid() == 0:
command = "sudo " + command
returncode = call(command, shell=True)
sys.exit()
def run_shell(shell=None):
shells = ['setup_ipython', 'setup_bpython', 'setup_plain']
available_shells = [shell] if shell else shells
for shell in available_shells:
try:
return globals()[shell]()
except ImportError:
pass
raise ImportError
def main(*args):
log_level = logging.WARNING
interface = None
if len(sys.argv) > 1 and len(sys.argv[1]) > 1:
flag = sys.argv[1]
if flag == 'notebook':
run_notebook(sys.argv[1:])
sys.exit()
elif flag == 'update':
print "Updating SimpleCV....."
self_update()
if flag in ['--headless', 'headless']:
# set SDL to use the dummy NULL video driver,
# so it doesn't need a windowing system.
os.environ["SDL_VIDEODRIVER"] = "dummy"
elif flag in ['--nowarnings', 'nowarnings']:
log_level = logging.INFO
elif flag in ['--debug', 'debug']:
log_level = logging.DEBUG
if flag in ['--ipython', 'ipython']:
interface = 'setup_ipython'
elif flag in ['--bpython', 'bpython']:
interface = 'setup_bpython'
else:
interface = 'setup_plain'
init_logging(log_level)
shellclear()
scvShell = run_shell(interface)
| bsd-3-clause |
huobaowangxi/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
datalyze-solutions/pandas-qt | doc/source/conf.py | 4 | 9477 | # -*- coding: utf-8 -*-
#
# pandas-qt documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 14 15:32:14 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../pandasqt'))
sys.path.insert(0, os.path.abspath('../../examples'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pandas-qt'
copyright = u'2014, Matthias Ludwig - Datalyze Solutions'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pandas-qtdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pandas-qt.tex', u'pandas-qt Documentation',
u'Matthias Ludwig - Datalyze Solutions', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pandas-qt', u'pandas-qt Documentation',
[u'Matthias Ludwig - Datalyze Solutions'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pandas-qt', u'pandas-qt Documentation',
u'Matthias Ludwig - Datalyze Solutions', 'pandas-qt', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| mit |
SANDAG/pandana | pandana/loaders/tests/test_osm.py | 3 | 6664 | import numpy.testing as npt
import pandas.util.testing as pdt
import pytest
import pandana
from pandana.loaders import osm
from pandana.testing import skipiftravis
@pytest.fixture(scope='module')
def bbox1():
# Intersection of Telegraph and Haste in Berkeley
# Sample query: http://overpass-turbo.eu/s/6AK
return 37.8659303546, -122.2588003879, 37.8661598571, -122.2585062512
@pytest.fixture(scope='module')
def bbox2():
# Telegraph Channing to Durant in Berkeley
# Sample query: http://overpass-turbo.eu/s/6B0
return 37.8668405874, -122.2590948685, 37.8679028054, -122.2586363885
@pytest.fixture(scope='module')
def bbox3():
# West Berkeley including highway 80, frontage roads, and foot paths
# Sample query: http://overpass-turbo.eu/s/6VE
return (
37.85225504880375, -122.30295896530151,
37.85776128099243, - 122.2954273223877)
@pytest.fixture(scope='module')
def query_data1(bbox1):
return osm.make_osm_query(osm.build_network_osm_query(*bbox1))
@pytest.fixture(scope='module')
def query_data2(bbox2):
return osm.make_osm_query(osm.build_network_osm_query(*bbox2))
@pytest.fixture(scope='module')
def dataframes1(query_data1):
return osm.parse_network_osm_query(query_data1)
@pytest.fixture(scope='module')
def dataframes2(query_data2):
return osm.parse_network_osm_query(query_data2)
def test_make_osm_query(query_data1):
assert isinstance(query_data1, dict)
assert len(query_data1['elements']) == 24
assert len(
[e for e in query_data1['elements'] if e['type'] == 'node']) == 22
assert len([e for e in query_data1['elements'] if e['type'] == 'way']) == 2
def test_process_node():
test_node = {
'id': 'id',
'lat': 'lat',
'lon': 'lon',
'extra': 'extra'
}
expected = {
'id': 'id',
'lat': 'lat',
'lon': 'lon'
}
assert osm.process_node(test_node) == expected
test_node['tags'] = {'highway': 'highway', 'source': 'source'}
expected['highway'] = 'highway'
assert osm.process_node(test_node) == expected
def test_process_way():
test_way = {
"type": "way",
"id": 188434143,
"timestamp": "2014-01-04T22:18:14Z",
"version": 2,
"changeset": 19814115,
"user": "dchiles",
"uid": 153669,
"nodes": [
53020977,
53041093,
],
"tags": {
'source': 'source',
"addr:city": "Berkeley",
"highway": "secondary",
"name": "Telegraph Avenue",
}
}
expected_way = {
'id': test_way['id'],
'addr:city': test_way['tags']['addr:city'],
'highway': test_way['tags']['highway'],
'name': test_way['tags']['name']
}
expected_waynodes = [
{'way_id': test_way['id'], 'node_id': test_way['nodes'][0]},
{'way_id': test_way['id'], 'node_id': test_way['nodes'][1]}
]
way, waynodes = osm.process_way(test_way)
assert way == expected_way
assert waynodes == expected_waynodes
def test_parse_network_osm_query(dataframes1):
nodes, ways, waynodes = dataframes1
assert len(nodes) == 22
assert len(ways) == 2
assert len(waynodes.index.unique()) == 2
def test_parse_network_osm_query_raises():
data = osm.make_osm_query(osm.build_network_osm_query(
37.8, -122.252, 37.8, -122.252))
with pytest.raises(RuntimeError):
osm.parse_network_osm_query(data)
def test_ways_in_bbox(bbox1, dataframes1):
nodes, ways, waynodes = osm.ways_in_bbox(*bbox1)
exp_nodes, exp_ways, exp_waynodes = dataframes1
pdt.assert_frame_equal(nodes, exp_nodes)
pdt.assert_frame_equal(ways, exp_ways)
pdt.assert_frame_equal(waynodes, exp_waynodes)
@pytest.mark.parametrize(
'network_type, noset',
[('walk', {'motorway', 'motorway_link'}),
('drive', {'footway', 'cycleway'})])
def test_ways_in_bbox_walk_network(bbox3, network_type, noset):
nodes, ways, waynodes = osm.ways_in_bbox(*bbox3, network_type=network_type)
for _, way in ways.iterrows():
assert way['highway'] not in noset
def test_intersection_nodes1(dataframes1):
_, _, waynodes = dataframes1
intersections = osm.intersection_nodes(waynodes)
assert intersections == {53041093}
def test_intersection_nodes2(dataframes2):
_, _, waynodes = dataframes2
intersections = osm.intersection_nodes(waynodes)
assert intersections == {53099275, 53063555}
def test_node_pairs_two_way(dataframes2):
nodes, ways, waynodes = dataframes2
pairs = osm.node_pairs(nodes, ways, waynodes)
assert len(pairs) == 1
fn = 53063555
tn = 53099275
pair = pairs.loc[(fn, tn)]
assert pair.from_id == fn
assert pair.to_id == tn
npt.assert_allclose(pair.distance, 101.20535797547758)
def test_node_pairs_one_way(dataframes2):
nodes, ways, waynodes = dataframes2
pairs = osm.node_pairs(nodes, ways, waynodes, two_way=False)
assert len(pairs) == 2
n1 = 53063555
n2 = 53099275
for p1, p2 in [(n1, n2), (n2, n1)]:
pair = pairs.loc[(p1, p2)]
assert pair.from_id == p1
assert pair.to_id == p2
npt.assert_allclose(pair.distance, 101.20535797547758)
@skipiftravis
def test_network_from_bbox(bbox2):
net = osm.network_from_bbox(*bbox2)
assert isinstance(net, pandana.Network)
def test_build_node_query_no_tags(bbox1):
query = osm.build_node_query(*bbox1)
assert query == (
'[out:json];'
'('
' node'
' '
' ({},{},{},{});'
');'
'out;').format(*bbox1)
def test_build_node_query_str_tag(bbox1):
tag = '"tag"'
query = osm.build_node_query(*bbox1, tags=tag)
assert query == (
'[out:json];'
'('
' node'
' ["tag"]'
' ({},{},{},{});'
');'
'out;').format(*bbox1)
def test_build_node_query_tag_list(bbox1):
tags = ['"tag1"="tag1"', '"tag2"!~"tag2|tag2"']
query = osm.build_node_query(*bbox1, tags=tags)
assert query == (
'[out:json];'
'('
' node'
' ["tag1"="tag1"]["tag2"!~"tag2|tag2"]'
' ({},{},{},{});'
');'
'out;').format(*bbox1)
def test_node_query(bbox2):
tags = '"amenity"="restaurant"'
cafes = osm.node_query(*bbox2, tags=tags)
assert len(cafes) == 4
assert 'lat' in cafes.columns
assert 'lon' in cafes.columns
assert cafes['name'][2965338413] == 'Koja Kitchen'
def test_node_query_raises():
with pytest.raises(RuntimeError):
osm.node_query(37.8, -122.282, 37.8, -122.252)
| agpl-3.0 |
jandom/rdkit | rdkit/Chem/Draw/UnitTestSimilarityMaps.py | 1 | 7019 | # $Id$
#
# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Sereina Riniker, Aug 2013
""" unit testing code for molecule drawing
"""
from __future__ import print_function
from rdkit import RDConfig
import unittest, os, tempfile
from rdkit import Chem
from rdkit.Chem import Draw
import platform
if platform.system() == "Linux":
import os, sys
if not os.environ.get("DISPLAY", None):
try:
# Force matplotlib to not use any Xwindows backend.
import matplotlib
print("Forcing use of Agg renderer", file=sys.stderr)
matplotlib.use('Agg')
except ImportError:
pass
try:
from rdkit.Chem.Draw import SimilarityMaps as sm
except ImportError:
sm = None
from rdkit.RDLogger import logger
logger = logger()
class TestCase(unittest.TestCase):
def setUp(self):
self.mol1 = Chem.MolFromSmiles('c1ccccc1')
self.mol2 = Chem.MolFromSmiles('c1ccncc1')
def testSimilarityMap(self):
# Morgan2 BV
refWeights = [0.5, 0.5, 0.5, -0.5, 0.5, 0.5]
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetMorganFingerprint(m, i, radius=2, fpType='bv'))
for w, r in zip(weights, refWeights):
self.assertEqual(w, r)
fig, maxWeight = sm.GetSimilarityMapForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetMorganFingerprint(m, i, radius=2, fpType='bv'))
self.assertEqual(maxWeight, 0.5)
weights, maxWeight = sm.GetStandardizedWeights(weights)
self.assertEqual(maxWeight, 0.5)
refWeights = [1.0, 1.0, 1.0, -1.0, 1.0, 1.0]
for w, r in zip(weights, refWeights):
self.assertEqual(w, r)
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetMorganFingerprint(m, i, fpType='count'))
self.assertTrue(weights[3] < 0)
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2,
lambda m, i: sm.GetMorganFingerprint(m, i, fpType='bv', useFeatures=True))
self.assertTrue(weights[3] < 0)
# hashed AP BV
refWeights = [0.09523, 0.17366, 0.17366, -0.23809, 0.17366, 0.17366]
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetAPFingerprint(m, i, fpType='bv', nBits=1024))
for w, r in zip(weights, refWeights):
self.assertAlmostEqual(w, r, 4)
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetAPFingerprint(m, i, fpType='normal'))
self.assertTrue(weights[3] < 0)
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetAPFingerprint(m, i, fpType='hashed'))
self.assertTrue(weights[3] < 0)
# hashed TT BV
refWeights = [0.5, 0.5, -0.16666, -0.5, -0.16666, 0.5]
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2,
lambda m, i: sm.GetTTFingerprint(m, i, fpType='bv', nBits=1024, nBitsPerEntry=1))
for w, r in zip(weights, refWeights):
self.assertAlmostEqual(w, r, 4)
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetTTFingerprint(m, i, fpType='normal'))
self.assertTrue(weights[3] < 0)
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetTTFingerprint(m, i, fpType='hashed'))
self.assertTrue(weights[3] < 0)
# RDK fingerprint BV
refWeights = [0.42105, 0.42105, 0.42105, -0.32895, 0.42105, 0.42105]
weights = sm.GetAtomicWeightsForFingerprint(
self.mol1, self.mol2, lambda m, i: sm.GetRDKFingerprint(m, i, nBits=1024, nBitsPerHash=1))
for w, r in zip(weights, refWeights):
self.assertAlmostEqual(w, r, 4)
def testSimilarityMapKWArgs(self):
# Morgan2 BV
m1 = Chem.MolFromSmiles('CC[C@](F)(Cl)c1ccccc1')
m2 = Chem.MolFromSmiles('CC[C@@](F)(Cl)c1ccccc1')
weights = sm.GetAtomicWeightsForFingerprint(
m1, m2, lambda m, i: sm.GetAPFingerprint(m, atomId=i, includeChirality=False))
for w in weights:
self.assertAlmostEqual(w, 0.100, 4)
weights = sm.GetAtomicWeightsForFingerprint(
m1, m2, lambda m, i: sm.GetAPFingerprint(m, atomId=i, includeChirality=True))
for i, w in enumerate(weights):
if i != 2:
self.assertAlmostEqual(w, 0.098, 3)
else:
self.assertAlmostEqual(w, -0.082, 3)
weights = sm.GetAtomicWeightsForFingerprint(
m1, m2, lambda m, i: sm.GetTTFingerprint(m, atomId=i, includeChirality=False))
for w in weights:
self.assertTrue(w > 0.0)
weights = sm.GetAtomicWeightsForFingerprint(
m1, m2, lambda m, i: sm.GetTTFingerprint(m, atomId=i, includeChirality=True))
for i, w in enumerate(weights):
if i > 4:
self.assertTrue(w > 0.0)
else:
self.assertTrue(w < 0.0)
weights = sm.GetAtomicWeightsForFingerprint(
m1, m2, lambda m, i: sm.GetMorganFingerprint(m, radius=1, atomId=i, useChirality=False))
weights2 = sm.GetAtomicWeightsForFingerprint(
m1, m2, lambda m, i: sm.GetMorganFingerprint(m, radius=1, atomId=i, useChirality=True))
# testing explicit values here seems silly, just check that the contribution of the
# chiral center drops:
self.assertTrue(weights[2] > weights2[2])
if __name__ == '__main__':
try:
import matplotlib
from rdkit.Chem.Draw.mplCanvas import Canvas
except ImportError:
pass
except RuntimeError: # happens with GTK can't initialize
pass
else:
unittest.main()
| bsd-3-clause |
fengzhe29888/gnuradio-old | gr-digital/examples/example_costas.py | 49 | 5316 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_costas(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.cst = digital.costas_loop_cc(bw, 2)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_cst = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.cst, self.vsnk_cst)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.cst,1), self.vsnk_frq)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.707,
help="Set the simulation's phase offset [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_costas(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset)
put.run()
data_src = scipy.array(put.vsnk_src.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi)
# adjust this to align with the data.
data_cst = scipy.array(3*[0,]+list(put.vsnk_cst.data()))
# Plot the Costas loop's LO frequency
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("Costas LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,2)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_cst.real, data_cst.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
s3.set_xlim([-2, 2])
s3.set_ylim([-2, 2])
# Plot the symbols in time
s4 = f1.add_subplot(2,2,3)
s4.set_position([0.125, 0.05, 0.775, 0.4])
s4.plot(data_src.real, "o-")
s4.plot(data_cst.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
bnaul/scikit-learn | examples/preprocessing/plot_all_scaling.py | 14 | 13721 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
=============================================================
Compare the effect of different scalers on data with outliers
=============================================================
Feature 0 (median income in a block) and feature 5 (number of households) of
the :ref:`california_housing_dataset` have very
different scales and contain some very large outliers. These two
characteristics lead to difficulties to visualize the data and, more
importantly, they can degrade the predictive performance of many machine
learning algorithms. Unscaled data can also slow down or even prevent the
convergence of many gradient-based estimators.
Indeed many estimators are designed with the assumption that each feature takes
values close to zero or more importantly that all features vary on comparable
scales. In particular, metric-based and gradient-based estimators often assume
approximately standardized data (centered features with unit variances). A
notable exception are decision tree-based estimators that are robust to
arbitrary scaling of the data.
This example uses different scalers, transformers, and normalizers to bring the
data within a pre-defined range.
Scalers are linear (or more precisely affine) transformers and differ from each
other in the way they estimate the parameters used to shift and scale each
feature.
:class:`~sklearn.preprocessing.QuantileTransformer` provides non-linear
transformations in which distances
between marginal outliers and inliers are shrunk.
:class:`~sklearn.preprocessing.PowerTransformer` provides
non-linear transformations in which data is mapped to a normal distribution to
stabilize variance and minimize skewness.
Unlike the previous transformations, normalization refers to a per sample
transformation instead of a per feature transformation.
The following code is a bit verbose, feel free to jump directly to the analysis
of the results_.
"""
# Author: Raghav RV <rvraghav93@gmail.com>
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Thomas Unterthiner
# License: BSD 3 clause
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib import cm
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import PowerTransformer
from sklearn.datasets import fetch_california_housing
print(__doc__)
dataset = fetch_california_housing()
X_full, y_full = dataset.data, dataset.target
# Take only 2 features to make visualization easier
# Feature of 0 has a long tail distribution.
# Feature 5 has a few but very large outliers.
X = X_full[:, [0, 5]]
distributions = [
('Unscaled data', X),
('Data after standard scaling',
StandardScaler().fit_transform(X)),
('Data after min-max scaling',
MinMaxScaler().fit_transform(X)),
('Data after max-abs scaling',
MaxAbsScaler().fit_transform(X)),
('Data after robust scaling',
RobustScaler(quantile_range=(25, 75)).fit_transform(X)),
('Data after power transformation (Yeo-Johnson)',
PowerTransformer(method='yeo-johnson').fit_transform(X)),
('Data after power transformation (Box-Cox)',
PowerTransformer(method='box-cox').fit_transform(X)),
('Data after quantile transformation (uniform pdf)',
QuantileTransformer(output_distribution='uniform')
.fit_transform(X)),
('Data after quantile transformation (gaussian pdf)',
QuantileTransformer(output_distribution='normal')
.fit_transform(X)),
('Data after sample-wise L2 normalizing',
Normalizer().fit_transform(X)),
]
# scale the output between 0 and 1 for the colorbar
y = minmax_scale(y_full)
# plasma does not exist in matplotlib < 1.5
cmap = getattr(cm, 'plasma_r', cm.hot_r)
def create_axes(title, figsize=(16, 6)):
fig = plt.figure(figsize=figsize)
fig.suptitle(title)
# define the axis for the first plot
left, width = 0.1, 0.22
bottom, height = 0.1, 0.7
bottom_h = height + 0.15
left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.05, height]
ax_scatter = plt.axes(rect_scatter)
ax_histx = plt.axes(rect_histx)
ax_histy = plt.axes(rect_histy)
# define the axis for the zoomed-in plot
left = width + left + 0.2
left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.1]
rect_histy = [left_h, bottom, 0.05, height]
ax_scatter_zoom = plt.axes(rect_scatter)
ax_histx_zoom = plt.axes(rect_histx)
ax_histy_zoom = plt.axes(rect_histy)
# define the axis for the colorbar
left, width = width + left + 0.13, 0.01
rect_colorbar = [left, bottom, width, height]
ax_colorbar = plt.axes(rect_colorbar)
return ((ax_scatter, ax_histy, ax_histx),
(ax_scatter_zoom, ax_histy_zoom, ax_histx_zoom),
ax_colorbar)
def plot_distribution(axes, X, y, hist_nbins=50, title="",
x0_label="", x1_label=""):
ax, hist_X1, hist_X0 = axes
ax.set_title(title)
ax.set_xlabel(x0_label)
ax.set_ylabel(x1_label)
# The scatter plot
colors = cmap(y)
ax.scatter(X[:, 0], X[:, 1], alpha=0.5, marker='o', s=5, lw=0, c=colors)
# Removing the top and the right spine for aesthetics
# make nice axis layout
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
# Histogram for axis X1 (feature 5)
hist_X1.set_ylim(ax.get_ylim())
hist_X1.hist(X[:, 1], bins=hist_nbins, orientation='horizontal',
color='grey', ec='grey')
hist_X1.axis('off')
# Histogram for axis X0 (feature 0)
hist_X0.set_xlim(ax.get_xlim())
hist_X0.hist(X[:, 0], bins=hist_nbins, orientation='vertical',
color='grey', ec='grey')
hist_X0.axis('off')
# %%
# Two plots will be shown for each scaler/normalizer/transformer. The left
# figure will show a scatter plot of the full data set while the right figure
# will exclude the extreme values considering only 99 % of the data set,
# excluding marginal outliers. In addition, the marginal distributions for each
# feature will be shown on the sides of the scatter plot.
def make_plot(item_idx):
title, X = distributions[item_idx]
ax_zoom_out, ax_zoom_in, ax_colorbar = create_axes(title)
axarr = (ax_zoom_out, ax_zoom_in)
plot_distribution(axarr[0], X, y, hist_nbins=200,
x0_label="Median Income",
x1_label="Number of households",
title="Full data")
# zoom-in
zoom_in_percentile_range = (0, 99)
cutoffs_X0 = np.percentile(X[:, 0], zoom_in_percentile_range)
cutoffs_X1 = np.percentile(X[:, 1], zoom_in_percentile_range)
non_outliers_mask = (
np.all(X > [cutoffs_X0[0], cutoffs_X1[0]], axis=1) &
np.all(X < [cutoffs_X0[1], cutoffs_X1[1]], axis=1))
plot_distribution(axarr[1], X[non_outliers_mask], y[non_outliers_mask],
hist_nbins=50,
x0_label="Median Income",
x1_label="Number of households",
title="Zoom-in")
norm = mpl.colors.Normalize(y_full.min(), y_full.max())
mpl.colorbar.ColorbarBase(ax_colorbar, cmap=cmap,
norm=norm, orientation='vertical',
label='Color mapping for values of y')
# %%
# .. _results:
#
# Original data
# -------------
#
# Each transformation is plotted showing two transformed features, with the
# left plot showing the entire dataset, and the right zoomed-in to show the
# dataset without the marginal outliers. A large majority of the samples are
# compacted to a specific range, [0, 10] for the median income and [0, 6] for
# the number of households. Note that there are some marginal outliers (some
# blocks have more than 1200 households). Therefore, a specific pre-processing
# can be very beneficial depending of the application. In the following, we
# present some insights and behaviors of those pre-processing methods in the
# presence of marginal outliers.
make_plot(0)
# %%
# StandardScaler
# --------------
#
# :class:`~sklearn.preprocessing.StandardScaler` removes the mean and scales
# the data to unit variance. The scaling shrinks the range of the feature
# values as shown in the left figure below.
# However, the outliers have an influence when computing the empirical mean and
# standard deviation. Note in particular that because the outliers on each
# feature have different magnitudes, the spread of the transformed data on
# each feature is very different: most of the data lie in the [-2, 4] range for
# the transformed median income feature while the same data is squeezed in the
# smaller [-0.2, 0.2] range for the transformed number of households.
#
# :class:`~sklearn.preprocessing.StandardScaler` therefore cannot guarantee
# balanced feature scales in the
# presence of outliers.
make_plot(1)
# %%
# MinMaxScaler
# ------------
#
# :class:`~sklearn.preprocessing.MinMaxScaler` rescales the data set such that
# all feature values are in
# the range [0, 1] as shown in the right panel below. However, this scaling
# compresses all inliers into the narrow range [0, 0.005] for the transformed
# number of households.
#
# Both :class:`~sklearn.preprocessing.StandardScaler` and
# :class:`~sklearn.preprocessing.MinMaxScaler` are very sensitive to the
# presence of outliers.
make_plot(2)
# %%
# MaxAbsScaler
# ------------
#
# :class:`~sklearn.preprocessing.MaxAbsScaler` is similar to
# :class:`~sklearn.preprocessing.MinMaxScaler` except that the
# values are mapped in the range [0, 1]. On positive only data, both scalers
# behave similarly.
# :class:`~sklearn.preprocessing.MaxAbsScaler` therefore also suffers from
# the presence of large outliers.
make_plot(3)
# %%
# RobustScaler
# ------------
#
# Unlike the previous scalers, the centering and scaling statistics of
# :class:`~sklearn.preprocessing.RobustScaler`
# is based on percentiles and are therefore not influenced by a few
# number of very large marginal outliers. Consequently, the resulting range of
# the transformed feature values is larger than for the previous scalers and,
# more importantly, are approximately similar: for both features most of the
# transformed values lie in a [-2, 3] range as seen in the zoomed-in figure.
# Note that the outliers themselves are still present in the transformed data.
# If a separate outlier clipping is desirable, a non-linear transformation is
# required (see below).
make_plot(4)
# %%
# PowerTransformer
# ----------------
#
# :class:`~sklearn.preprocessing.PowerTransformer` applies a power
# transformation to each feature to make the data more Gaussian-like in order
# to stabilize variance and minimize skewness. Currently the Yeo-Johnson
# and Box-Cox transforms are supported and the optimal
# scaling factor is determined via maximum likelihood estimation in both
# methods. By default, :class:`~sklearn.preprocessing.PowerTransformer` applies
# zero-mean, unit variance normalization. Note that
# Box-Cox can only be applied to strictly positive data. Income and number of
# households happen to be strictly positive, but if negative values are present
# the Yeo-Johnson transformed is preferred.
make_plot(5)
make_plot(6)
# %%
# QuantileTransformer (uniform output)
# ------------------------------------
#
# :class:`~sklearn.preprocessing.QuantileTransformer` applies a non-linear
# transformation such that the
# probability density function of each feature will be mapped to a uniform
# or Gaussian distribution. In this case, all the data, including outliers,
# will be mapped to a uniform distribution with the range [0, 1], making
# outliers indistinguishable from inliers.
#
# :class:`~sklearn.preprocessing.RobustScaler` and
# :class:`~sklearn.preprocessing.QuantileTransformer` are robust to outliers in
# the sense that adding or removing outliers in the training set will yield
# approximately the same transformation. But contrary to
# :class:`~sklearn.preprocessing.RobustScaler`,
# :class:`~sklearn.preprocessing.QuantileTransformer` will also automatically
# collapse any outlier by setting them to the a priori defined range boundaries
# (0 and 1). This can result in saturation artifacts for extreme values.
make_plot(7)
##############################################################################
# QuantileTransformer (Gaussian output)
# -------------------------------------
#
# To map to a Gaussian distribution, set the parameter
# ``output_distribution='normal'``.
make_plot(8)
# %%
# Normalizer
# ----------
#
# The :class:`~sklearn.preprocessing.Normalizer` rescales the vector for each
# sample to have unit norm,
# independently of the distribution of the samples. It can be seen on both
# figures below where all samples are mapped onto the unit circle. In our
# example the two selected features have only positive values; therefore the
# transformed data only lie in the positive quadrant. This would not be the
# case if some original features had a mix of positive and negative values.
make_plot(9)
plt.show()
| bsd-3-clause |