code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ECG Heartbeat Classification
# + pycharm={"name": "#%%\n", "is_executing": false}
import os
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from utils import get_model, helpers
from experiments import run_mitbih, run_ptbdl
# + pycharm={"name": "#%%\n", "is_executing": false}
# Make directory
model_directory = "./models"
if not os.path.exists(model_directory):
os.makedirs(model_directory)
# -
# ### MITBIH Dataset
# + [markdown] pycharm={"name": "#%% md\n", "is_executing": false}
# #### Data Exploration
# + pycharm={"name": "#%%\n", "is_executing": false}
# %run ./data_exploration/data_exploration_mibih.ipynb
# -
# #### Neural Network Experiments
# + pycharm={"name": "#%%\n", "is_executing": false}
models = [
"rnn_lstm",
"rnn_gru",
"rnn_gru_bidir",
"rnn_lstm_bidir",
]
# + pycharm={"name": "#%%\n", "is_executing": false}
df_train = pd.read_csv("../data/ECG_Heartbeat_Classification/heartbeat/mitbih_train.csv", header=None)
df_train = df_train.sample(frac=1)
df_test = pd.read_csv("../data/ECG_Heartbeat_Classification/heartbeat/mitbih_test.csv", header=None)
Y = np.array(df_train[187].values).astype(np.int8)
X = np.array(df_train[list(range(187))].values)[..., np.newaxis]
Y_test = np.array(df_test[187].values).astype(np.int8)
X_test = np.array(df_test[list(range(187))].values)[..., np.newaxis]
# + pycharm={"name": "#%%\n", "is_executing": false}
if "rnn_lstm" in models:
run_mitbih.run_lstm(model_directory, X, Y, X_test, Y_test)
if "rnn_gru" in models:
run_mitbih.run_gru(model_directory, X, Y, X_test, Y_test)
if "rnn_lstm_bidir" in models:
run_mitbih.run_lstm_bidir(model_directory, X, Y, X_test, Y_test)
if "rnn_gru_bidir" in models:
run_mitbih.run_gru_bidir(model_directory, X, Y, X_test, Y_test)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### PTBDB Dataset
# -
# #### Data Exploration
# + pycharm={"name": "#%%\n", "is_executing": false}
# %run ./data_exploration/data_exploration_ptbdb.ipynb
# -
# #### Neural Network Experiments
# + pycharm={"name": "#%%\n", "is_executing": false}
models = [
"rnn_lstm",
"rnn_gru",
"rnn_gru_bidir",
"rnn_lstm_bidir",
"rnn_transferlearning",
]
# + pycharm={"name": "#%%\n", "is_executing": false}
df_1 = pd.read_csv("../data/ECG_Heartbeat_Classification/ptbdb_normal.csv", header=None)
df_2 = pd.read_csv("../data/ECG_Heartbeat_Classification/ptbdb_abnormal.csv", header=None)
df = pd.concat([df_1, df_2])
df_train, df_test = train_test_split(
df, test_size=0.2, random_state=1337, stratify=df[187]
)
Y = np.array(df_train[187].values).astype(np.int8)
X = np.array(df_train[list(range(187))].values)[..., np.newaxis]
Y_test = np.array(df_test[187].values).astype(np.int8)
X_test = np.array(df_test[list(range(187))].values)[..., np.newaxis]
# + pycharm={"name": "#%%\n", "is_executing": false}
if "rnn_lstm" in models:
run_ptbdl.run_lstm(model_directory, X, Y, X_test, Y_test)
if "rnn_gru" in models:
run_ptbdl.run_gru(model_directory, X, Y, X_test, Y_test)
if "rnn_lstm_bidir" in models:
run_ptbdl.run_lstm_bidir(model_directory, X, Y, X_test, Y_test)
if "rnn_gru_bidir" in models:
run_ptbdl.run_gru_bidir(model_directory, X, Y, X_test, Y_test)
if "rnn_transferlearning" in models:
base_model = get_model.rnn_gru_bidir(
nclass=5, dense_layers=[64, 16, 8], binary=False
)
file_name = "mitbih_rnn_gru_bidir"
file_path = os.path.join(model_directory, file_name + ".h5")
base_model.load_weights(file_path)
run_ptbdl.run_transfer_learning(base_model, model_directory, X, Y, X_test, Y_test)
# + pycharm={"name": "#%%\n"}
| code/run_all.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Overview: Game Maps
# +
# Pandas
import numpy as np
import pandas as pd
# Plot
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.style.use('seaborn-whitegrid')
# %matplotlib inline
# -
# Read data
# +
dataset_folder = '../input'
plot_folder = '../plot'
courses = pd.read_csv('%s/%s.csv' % (dataset_folder, 'courses'), sep='\t', encoding='utf-8')
players = pd.read_csv('%s/%s.csv' % (dataset_folder, 'players'), sep='\t', encoding='utf-8')
# -
# ## Overview
courses.head(3)
# ### Difficulty
# palette of colors
palette = sns.color_palette('cubehelix', 4)
sns.palplot(palette)
# ### Game Style
# +
# autopct pie plot - function
def func(pct, allvals):
absolute = float(pct/100.*np.sum(allvals))/1000.0
return "{:.1f}%\n({:.1f}k)".format(pct, absolute)
# plot
fontsize = 14
# +
# values
labels = courses['difficulty'].unique().tolist()
values = [sum(courses['difficulty'] == label) for label in labels]
print(list(zip(labels, values)))
explode = [0.03] * len(values)
# plot
fig, ax = plt.subplots()
ax.pie(values, autopct=lambda pct: func(pct, values), pctdistance=0.45,
colors=palette, explode=explode, labels=labels,
textprops={'fontsize':fontsize,'weight':'bold'})
centre_circle = plt.Circle((0,0),0.75,fc='white')
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
# figure
ax.axis('equal')
plt.tight_layout()
plt.savefig('%s/%s.pdf' % (plot_folder, 'difficulty'), dpi=300, bbox_inches='tight')
plt.show()
# +
# values
labels = courses['gameStyle'].unique().tolist()
values = [sum(courses['gameStyle'] == label) for label in labels]
print(list(zip(labels, values)))
explode = [0.03] * len(values)
# plot
fig, ax = plt.subplots()
ax.pie(values, autopct=lambda pct: func(pct, values), pctdistance=0.45,
colors=palette, explode=explode, labels=labels,
textprops={'fontsize':fontsize,'weight':'bold'})
centre_circle = plt.Circle((0,0),0.75,fc='white')
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
# figure
ax.axis('equal')
plt.tight_layout()
plt.savefig('%s/%s.pdf' % (plot_folder, 'gameStyle'), dpi=300, bbox_inches='tight')
plt.show()
# -
# ### Makers - Players who develop maps
makers = courses['maker'].value_counts().to_dict()
print('number of makers: %d' % (len(makers)))
# Who produce more courses?
# Top-25 makers (by number of courses)
# +
# values
top = 25
labels = list(makers.keys())[0:top]
x_axis = range(len(labels))
y_axis = list(makers.values())[0:top]
# plot
fig, ax = plt.subplots()
plt.bar(x_axis, y_axis, align='center', color=palette[0])
plt.xticks(x_axis, labels, rotation=90)
plt.show()
# -
# ### Courses by country
players = players.set_index('id')
players.head()
# players
df = pd.DataFrame(makers, index=['courses']).transpose()
df = df.rename(columns={'index':'id'})
df2 = pd.concat([df, players], sort=True, axis=1)
df2 = df2.dropna(subset=['courses']).sort_values(by=['courses'], ascending=False)
df2.head()
# Counting number of courses by country
countries = {flag:0 for flag in df2['flag'].unique().tolist()}
for maker, row in df2.iterrows():
countries[row['flag']] += int(row['courses'])
# +
# values
labels = list(countries.keys())
values = [countries[label] for label in labels]
print(countries)
explode = [0.03] * len(labels)
# plot
fig, ax = plt.subplots()
ax.pie(values, autopct=lambda pct: func(pct, values), pctdistance=0.45,
colors=palette, explode=explode, labels=labels,
textprops={'fontsize':fontsize,'weight':'bold'})
centre_circle = plt.Circle((0,0),0.75,fc='white')
fig = plt.gcf()
fig.gca().add_artist(centre_circle)
# figure
ax.axis('equal')
plt.tight_layout()
plt.savefig('%s/%s.pdf' % (plot_folder, 'countries'), dpi=300, bbox_inches='tight')
plt.show()
# -
| jupyter/game-maps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Dependencies
# + _kg_hide-input=true
import json, warnings, shutil, glob, time
from jigsaw_utility_scripts import *
from transformers import TFXLMRobertaModel, XLMRobertaConfig
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers, metrics, losses, layers
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
SEED = 0
seed_everything(SEED)
warnings.filterwarnings("ignore")
# -
# ## TPU configuration
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _kg_hide-input=true _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
strategy, tpu = set_up_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
AUTO = tf.data.experimental.AUTOTUNE
# -
# # Load data
# + _kg_hide-input=true
database_base_path = '/kaggle/input/jigsaw-dataset-split-pb-roberta-large-192-att-mask/'
k_fold = pd.read_csv(database_base_path + '5-fold.csv')
valid_df = pd.read_csv("/kaggle/input/jigsaw-multilingual-toxic-comment-classification/validation.csv", usecols=['comment_text', 'toxic', 'lang'])
print('Train set samples: %d' % len(k_fold))
print('Validation set samples: %d' % len(valid_df))
display(k_fold.head())
# Unzip files
# !tar -xvf /kaggle/input/jigsaw-dataset-split-pb-roberta-large-192-att-mask/fold_1.tar.gz
# # !tar -xvf /kaggle/input/jigsaw-dataset-split-pb-roberta-large-192-att-mask/fold_2.tar.gz
# # !tar -xvf /kaggle/input/jigsaw-dataset-split-pb-roberta-large-192-att-mask/fold_3.tar.gz
# # !tar -xvf /kaggle/input/jigsaw-dataset-split-pb-roberta-large-192-att-mask/fold_4.tar.gz
# # !tar -xvf /kaggle/input/jigsaw-dataset-split-pb-roberta-large-192-att-mask/fold_5.tar.gz
# -
# # Model parameters
# + _kg_hide-input=false
base_path = '/kaggle/input/jigsaw-transformers/XLM-RoBERTa/'
config = {
"MAX_LEN": 192,
"BATCH_SIZE": 16 * strategy.num_replicas_in_sync,
"EPOCHS": 2,
"LEARNING_RATE": 1e-5,
"ES_PATIENCE": 1,
"N_FOLDS": 1,
"base_model_path": base_path + 'tf-xlm-roberta-large-tf_model.h5',
"config_path": base_path + 'xlm-roberta-large-config.json'
}
with open('config.json', 'w') as json_file:
json.dump(json.loads(json.dumps(config)), json_file)
# -
# # Model
# +
module_config = XLMRobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)
def model_fn(MAX_LEN):
input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
base_model = TFXLMRobertaModel.from_pretrained(config['base_model_path'], config=module_config)
last_hidden_state, _= base_model({'input_ids': input_ids, 'attention_mask': attention_mask})
cls_token = last_hidden_state[:, 0, :]
# x = layers.GlobalAveragePooling1D()(last_hidden_state)
output = layers.Dense(1, activation='sigmoid', name='output')(cls_token)
model = Model(inputs=[input_ids, attention_mask], outputs=output)
return model
# +
# Datasets
def get_training_dataset(x_train, y_train, batch_size, buffer_size):
dataset = tf.data.Dataset.from_tensor_slices(({'input_ids': x_train[0],
'attention_mask': x_train[1]}, y_train))
dataset = dataset.repeat()
dataset = dataset.shuffle(2048, seed=SEED)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(buffer_size)
return dataset
def get_validation_dataset(x_valid, y_valid, batch_size, buffer_size, repeated=False):
dataset = tf.data.Dataset.from_tensor_slices(({'input_ids': x_valid[0],
'attention_mask': x_valid[1]}, y_valid))
if repeated:
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.cache()
dataset = dataset.prefetch(buffer_size)
return dataset
def get_test_dataset(x_test, batch_size, buffer_size):
dataset = tf.data.Dataset.from_tensor_slices({'input_ids': x_test[0],
'attention_mask': x_test[1]})
dataset = dataset.batch(batch_size)
return dataset
# -
# # Train
# +
history_list = []
for n_fold in range(config['N_FOLDS']):
tf.tpu.experimental.initialize_tpu_system(tpu)
print('\nFOLD: %d' % (n_fold+1))
# Load data
base_data_path = 'fold_%d/' % (n_fold+1)
x_train = np.load(base_data_path + 'x_train.npy')
y_train = np.load(base_data_path + 'y_train.npy').reshape(x_train.shape[1], 1)
# x_valid = np.load(base_data_path + 'x_valid.npy')
x_valid_ml = np.load(database_base_path + 'x_valid.npy')
y_valid_ml = np.load(database_base_path + 'y_valid.npy').reshape(x_valid_ml.shape[1], 1)
step_size = x_train.shape[1] // config['BATCH_SIZE']
valid_step_size = x_valid_ml.shape[1] // config['BATCH_SIZE']
### Delete data dir
shutil.rmtree(base_data_path)
# Build TF datasets
train_dist_ds = strategy.experimental_distribute_dataset(get_training_dataset(x_train, y_train, config['BATCH_SIZE'], AUTO))
valid_dist_ds = strategy.experimental_distribute_dataset(get_validation_dataset(x_valid_ml, y_valid_ml, config['BATCH_SIZE'], AUTO, repeated=True))
train_data_iter = iter(train_dist_ds)
valid_data_iter = iter(valid_dist_ds)
# Step functions
@tf.function
def train_step(data_iter):
def train_step_fn(x, y):
with tf.GradientTape() as tape:
probabilities = model(x, training=True)
loss = loss_fn(y, probabilities)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_accuracy.update_state(y, probabilities)
train_auc.update_state(y, probabilities)
train_loss.update_state(loss)
for _ in tf.range(step_size):
strategy.experimental_run_v2(train_step_fn, next(data_iter))
@tf.function
def valid_step(data_iter):
def valid_step_fn(x, y):
probabilities = model(x, training=False)
loss = loss_fn(y, probabilities)
valid_accuracy.update_state(y, probabilities)
valid_auc.update_state(y, probabilities)
valid_loss.update_state(loss)
for _ in tf.range(valid_step_size):
strategy.experimental_run_v2(valid_step_fn, next(data_iter))
# Train model
model_path = 'model_fold_%d.h5' % (n_fold+1)
with strategy.scope():
model = model_fn(config['MAX_LEN'])
optimizer = optimizers.Adam(lr=config['LEARNING_RATE'])
train_accuracy = metrics.BinaryAccuracy()
valid_accuracy = metrics.BinaryAccuracy()
train_auc = metrics.AUC()
valid_auc = metrics.AUC()
train_loss = metrics.Sum()
valid_loss = metrics.Sum()
loss_fn = losses.binary_crossentropy
# Setup training loop
step = 0
epoch = 0
epoch_steps = 0
epoch_start_time = time.time()
history = {'loss': [], 'val_loss': [], 'auc': [], 'val_auc': [],
'accuracy': [], 'val_accuracy': []}
patience_cnt = 0
best_val = float("inf")
### Train model
while True:
# run training step
train_step(train_data_iter)
epoch_steps += step_size
step += step_size
# validation run at the end of each epoch
if (step // step_size) > epoch:
# validation run
valid_epoch_steps = 0
valid_step(valid_data_iter)
valid_epoch_steps += valid_step_size
# compute metrics
history['accuracy'].append(train_accuracy.result().numpy())
history['auc'].append(train_auc.result().numpy())
history['loss'].append(train_loss.result().numpy() / (config['BATCH_SIZE'] * epoch_steps))
history['val_accuracy'].append(valid_accuracy.result().numpy())
history['val_auc'].append(valid_auc.result().numpy())
history['val_loss'].append(valid_loss.result().numpy() / (config['BATCH_SIZE'] * valid_epoch_steps))
# report metrics
epoch_time = time.time() - epoch_start_time
print('\nEPOCH {:d}/{:d}'.format(epoch+1, config['EPOCHS']))
print('time: {:0.1f}s'.format(epoch_time),
'loss: {:0.4f}'.format(history['loss'][-1]),
'auc: {:0.4f}'.format(history['auc'][-1]),
'accuracy: {:0.4f}'.format(history['accuracy'][-1]),
'val_loss: {:0.4f}'.format(history['val_loss'][-1]),
'val_auc: {:0.4f}'.format(history['val_auc'][-1]),
'val_accuracy: {:0.4f}'.format(history['val_accuracy'][-1]))
# set up next epoch
epoch = step // step_size
epoch_steps = 0
epoch_start_time = time.time()
train_accuracy.reset_states()
train_auc.reset_states()
train_loss.reset_states()
valid_accuracy.reset_states()
valid_auc.reset_states()
valid_loss.reset_states()
if epoch >= config['EPOCHS']:
model.save_weights(model_path)
break
# Early stopping monitor
if history['val_loss'][-1] <= best_val:
best_val = history['val_loss'][-1]
model.save_weights(model_path)
print('Saved model weights at "%s"' % model_path)
else:
patience_cnt += 1
if patience_cnt > config['ES_PATIENCE']:
print('Epoch %05d: early stopping' % epoch)
break
history_list.append(history)
# # Fine-tune on validation set
# print('\nFine-tune on validation set')
# n_steps2 = x_valid_ml.shape[1] // config['BATCH_SIZE']
# history2 = model.fit(get_training_dataset(x_valid_ml, y_valid_ml, config['BATCH_SIZE'], AUTO),
# steps_per_epoch=n_steps2,
# epochs=config['EPOCHS'],
# verbose=2).history
# Make predictions
# train_preds = model.predict(get_test_dataset(np.load(base_data_path + 'x_train.npy'), config['BATCH_SIZE'], AUTO))
# valid_preds = model.predict(get_test_dataset(np.load(base_data_path + 'x_valid.npy'), config['BATCH_SIZE'], AUTO))
# valid_ml_preds = model.predict(get_test_dataset(np.load(database_base_path + 'x_valid.npy'), config['BATCH_SIZE'], AUTO))
# k_fold.loc[k_fold['fold_%d' % (n_fold+1)] == 'train', 'pred_%d' % (n_fold+1)] = np.round(train_preds)
# k_fold.loc[k_fold['fold_%d' % (n_fold+1)] == 'validation', 'pred_%d' % (n_fold+1)] = np.round(valid_preds)
# valid_df['pred_%d' % (n_fold+1)] = np.round(valid_ml_preds)
# -
# ## Model loss graph
# + _kg_hide-input=true
sns.set(style="whitegrid")
for n_fold in range(config['N_FOLDS']):
print('Fold: %d' % (n_fold+1))
plot_metrics(history_list[n_fold])
# -
# # Model evaluation
# + _kg_hide-input=true
# display(evaluate_model(k_fold, config['N_FOLDS']).style.applymap(color_map))
# -
# # Confusion matrix
# + _kg_hide-input=true
# for n_fold in range(config['N_FOLDS']):
# print('Fold: %d' % (n_fold+1))
# train_set = k_fold[k_fold['fold_%d' % (n_fold+1)] == 'train']
# validation_set = k_fold[k_fold['fold_%d' % (n_fold+1)] == 'validation']
# plot_confusion_matrix(train_set['toxic'], train_set['pred_%d' % (n_fold+1)],
# validation_set['toxic'], validation_set['pred_%d' % (n_fold+1)])
# -
# # Model evaluation by language
# + _kg_hide-input=true
# display(evaluate_model_lang(valid_df, config['N_FOLDS']).style.applymap(color_map))
# -
# # Visualize predictions
# + _kg_hide-input=true
pd.set_option('max_colwidth', 120)
print('English validation set')
display(k_fold[['comment_text', 'toxic'] + [c for c in k_fold.columns if c.startswith('pred')]].head(10))
print('Multilingual validation set')
display(valid_df[['comment_text', 'toxic'] + [c for c in k_fold.columns if c.startswith('pred')]].head(10))
# -
# # Test set predictions
model_path_list = glob.glob('/kaggle/working/' + '*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep = "\n")
# +
x_test = np.load(database_base_path + 'x_test.npy')
NUM_TEST_IMAGES = x_test.shape[1]
test_preds = np.zeros((NUM_TEST_IMAGES, 1))
for model_path in model_path_list:
tf.tpu.experimental.initialize_tpu_system(tpu)
print(model_path)
with strategy.scope():
model = model_fn(config['MAX_LEN'])
model.load_weights(model_path)
test_preds += model.predict(get_test_dataset(x_test, config['BATCH_SIZE'], AUTO)) / len(model_path_list)
# + _kg_hide-input=true
submission = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/sample_submission.csv')
submission['toxic'] = test_preds
submission.to_csv('submission.csv', index=False)
display(submission.describe())
display(submission.head(10))
| Model backlog/Train/31-jigsaw-train-1fold-xlm-roberta-large-optimized.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/tqiaowen/marchantia-stress/blob/main/marchantia_stress.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="f4f_0f6taBn9"
# # Code to replicate analysis
#
# ## Chapters
# ### 1. Miscellaneous preparation steps
# 1. Install non-default modules and upgrade modules
# 1. Mount Google Drive
# 1. Set paths (point to google drive folder to work in)
# 1. Download files (initial set up, skip if continuing)
# 1. Import modules, initialise paths
#
# ### 2. Generating data for analysis
# 1. Differential Gene Expression (DESeq2)
# 1. Supp. Fig 4: Comparison of DEGs between two controls
# 1. Diurnal Gene Expression (JTK_cycle)
#
# ### 3. Analysis and plotting
# 1. Figure 1 & Supp. Fig 1: Measurements and Student's t-test
# 1. Figure 2: Interspecies comparison (Biological processes)
# 1. Supp. Fig 5: Interspecies comparison (Gene families)
# 1. Figure 3: Stress responsiveness
# 1. Figure 4: Upset plot and summary of DEGs in Marchantia
# 1. Figure 5, Supp. Fig. 6 & 7: Inter-stress (Marchantia only) comparison
# 1. Figure 6: Diurnal gene expression
# 1. Supp. Fig 2: QC of RNA-seq data
# 1. Supp. Fig 3: Volcano plots (DESeq2)
#
# 1. Supp. Fig 8: Overview of diurnal data
#
# ### 4. Experimental
# 1. Download RNA-seq data
# 1. Mapping and generating expression matrix
# + [markdown] id="63O3M3hPgwyv"
# # 1. Miscellaneous preparation steps
# + [markdown] id="uGwkg7hkhMYY"
# ### 1.1 Install non-default modules and upgrade modules
# + id="9THYinX_gpJ4"
# install non-default colab modules
# Restart runtime after installation and skip to next step
# !pip install upsetplot
# !pip install matplotlib --upgrade
# + [markdown] id="DutthK8QhAkJ"
# ### 1.2 Mount Google Drive
# + id="gLAcF40pZUM4"
from google.colab import drive
drive.mount('/content/gdrive')
# !rm -rf /content/sample_data
# + id="50GdOAqr8D6D"
#@title 1.3 Set path {display-mode: "form"}
#@markdown Enter the path of the directory you want to work in.
drive_path = '/content/gdrive/My Drive/' #@param {type: 'string'}
# + [markdown] id="5-ZWhJDzo7e6"
# ### 1.4 Download files (first time only)
# + id="SFhEwOSCGeF_"
# Downloads necessary files to perform analyses [only need to be done once]
# https://gist.github.com/iamtekeste/3cdfd0366ebfd2c0d805 download raw files directtly from Google Drive
# !wget --no-check-certificate -r "https://drive.google.com/uc?id=1cbKgWbEWtstl_2_rb06tI_D-vseprPnT&export=download" -O marchantia_stress.zip
dir_path = drive_path + 'marchantia_stress/'
dir_path_safe = dir_path.replace(' ', '\ ')
# !mkdir $dir_path_safe
# !unzip marchantia_stress.zip -d $dir_path_safe
# + [markdown] id="u6GBaG6BQBR6"
# ###1.5 Import modules, set paths
# + id="m3F4Mqz3P__H"
# import modules
import os
import string
# %load_ext rpy2.ipython
import pandas as pd
import math
from matplotlib_venn import venn2
from matplotlib import pyplot as plt
from ast import literal_eval
import seaborn as sns
from collections import Counter
import random
from statsmodels.stats.multitest import multipletests
import numpy as np
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial.distance import squareform
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
dir_path = drive_path + 'marchantia_stress/'
dir_path_safe = dir_path.replace(' ', '\ ')
# + [markdown] id="Dqb8m7ATg19d"
# # 2. Generating data for analysis
# + [markdown] id="bzX8KZDXIMXV"
# ### 2.1 Differential Gene Expression
# + id="DRBwbtjMVsSF"
# Making necessary directories for outputs
mpo_path_safe = dir_path_safe + "prep_files/mpo/deseq/"
osa_path_safe = dir_path_safe + "prep_files/osa/deseq/"
if not os.path.exists(mpo_path_safe):
# !mkdir -p $mpo_path
print("Directories made: " + mpo_path.replace('\\', ''))
if not os.path.exists(osa_path_safe):
# !mkdir -p $osa_path
print("Directories made: " + osa_path.replace('\\', ''))
# + id="a_uzqH_PUYQ-"
# Installing DESeq2
# %%R
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("DESeq2", ask = FALSE)
# + id="rjetqXevenIe"
# To pull python variables
mpo_path = dir_path + "prep_files/mpo/deseq/"
osa_path = dir_path + "prep_files/osa/deseq/"
# %R -i dir_path
# %Rget dir_path
# %R -i dir_path_safe
# %Rget dir_path_safe
# %R -i mpo_path
# %Rget mpo_path
# %R -i osa_path
# %Rget osa_path
# + id="0HBke3tVIPk5"
# adapted from DESeq2_stressonly_phase1n2.R
# %%R
# DESeq2 (Marchantia)
library('DESeq2')
library('RColorBrewer')
sink(paste0(mpo_path, "phase1n2_sum.txt"), type="output")
raw_counts <- read.table(file = paste0(dir_path, 'prep_files/all_stress_raw.tsv'), sep = '\t', header = TRUE)
raw_counts <- data.frame(raw_counts, row.names = 1)
stresses <- c("controlH2", "controlD2", "H", "C", "M", "S", "L", "D", "N",
"HS", "HM", "HN", "CS", "CM", "CN", "SM", "ML", "NL", "MN",
"SD", "MD", "ND", "HD", "CD", "CL", "LS", "SN")
colData = read.csv(paste0(dir_path, 'summary_files/all_stress.txt'), sep = '\t', row.names=1, header = FALSE)
names(colData) <- c('condition')
dds = DESeqDataSetFromMatrix(countData=raw_counts,
colData=colData,
design=~condition)
dds = DESeq(dds)
y = 2
for (x in 1:2){
for (i in y:length(stresses)){
if (stresses[i] != stresses[x]){
res = results(dds, contrast=c("condition", stresses[i], stresses[x]))
res = res[order(res$pvalue),]
resSig = subset(res, res$padj < 0.05 & abs(res$log2FoldChange) > 1)
resSig = resSig[ order(resSig$padj), ]
print(paste(stresses[i], 'vs', stresses[x]))
summary(res)
summary(resSig)
write.table(as.data.frame(res), file=paste(mpo_path, stresses[i], stresses[x], '_res.tsv', sep = ''),
quote=FALSE, sep='\t', col.names = NA)
write.table(as.data.frame(resSig), file=paste(mpo_path, stresses[i], stresses[x], '_resSig.tsv', sep = ''),
quote=FALSE, sep='\t', col.names = NA)
}
}
y = y + 1
}
# + id="yW_QBfG4tW0N" language="R"
# # DESeq2 (Rice) adapted from DESeq2_Osa.R
#
# library('DESeq2')
# library('RColorBrewer')
#
# raw_counts <- read.table(file = paste0(dir_path, 'prep_files/expmat_Osa_raw.tsv'), sep = '\t', header = TRUE)
# raw_counts <- data.frame(raw_counts, row.names = 1)
#
# annotations <- c("1913_salt", "1913_control",
# "5941_cold", "5941_control",
# "ERP003982_salt", "ERP003982_control",
# "GSE57950_drought", "GSE57950_control")
#
# md1913 = c("1913_salt",
# "1913_control",
# "1913_control",
# "1913_salt",
# "1913_salt",
# "1913_control")
# md5941 = c(rep(c("5941_control"), 3),
# rep(c("5941_cold"), 3),
# rep(c("5941_control"), 3),
# rep(c("5941_cold"), 3))
# md3982 = c("ERP003982_salt",
# "ERP003982_control",
# rep(c("ERP003982_salt"), 2),
# rep(c("ERP003982_control"), 2))
# md57950 = c(rep(c("GSE57950_control"), 6),
# rep(c("GSE57950_drought"), 6))
#
# mdlist <- list(md1913, md5941, md3982, md57950)
#
# for (i in seq(1,length(annotations), by = 2)){
# df = raw_counts[, grep(strsplit(annotations[i], "_")[[1]][1], names(raw_counts))]
# sampleMetaData <- data.frame(condition = mdlist[[i - (i-1)/2]])
# dds = DESeqDataSetFromMatrix(countData=df,
# colData=sampleMetaData,
# design=~condition)
# dds = DESeq(dds)
# res = results(dds, contrast=c("condition", annotations[i], annotations[i+1]))
# res = res[order(res$pvalue),]
# resSig = subset(res, res$padj < 0.05 & abs(res$log2FoldChange) > 1)
# resSig = resSig[ order(resSig$padj), ]
# print(paste(annotations[i], 'vs', annotations[i+1]))
# summary(res)
# summary(resSig)
# write.table(as.data.frame(res), file=paste(osa_path, annotations[i], strsplit(annotations[i+1], "_")[[1]][2], '_res.tsv', sep = ''),
# quote=FALSE, sep='\t', col.names = NA)
# write.table(as.data.frame(resSig), file=paste(osa_path, annotations[i], strsplit(annotations[i+1], "_")[[1]][2], '_resSig.tsv', sep = ''),
# quote=FALSE, sep='\t', col.names = NA)
# }
# + [markdown] id="OZ8K1xcl7dfZ"
# ### 2.2 Supp. Fig 4: Comparison of DEGs between two controls
# + id="-Bz6wm_ABLin"
wdir = dir_path + 'prep_files/mpo/deseq/'
deseqouts = [x for x in os.listdir(wdir) if "resSig.tsv" in x]
deseqouts.remove('controlD2controlH2_resSig.tsv')
# create subplots
xlen = 4
ylen = math.ceil(len(deseqouts)/8)
figw = xlen * 4
figh = ylen * 2.5
stress_list = ['H', 'C', 'HM', 'CM', 'M', 'CL', 'ML', 'L', 'HS', 'CS', 'SM', 'LS', 'S',
'HN', 'CN', 'MN', 'NL', 'SN', 'N', 'HD', 'CD', 'MD', 'SD', 'ND', 'D']
f_axes = string.ascii_uppercase[:len(stress_list)]
axd = plt.figure(constrained_layout=True,
figsize=(figw, figh)).subplot_mosaic(
"""
A......
.B.....
CDE....
.FGH...
IJKLM..
NOPQRS.
TUV.WXY
""",
gridspec_kw = {'hspace' : 0.3}
)
counter = 0
for i in stress_list:
files = [x for x in deseqouts if x.startswith(i+'control')]
files.sort()
fileD2 = pd.read_csv(wdir + files[0],
sep = "\t", header = 0, index_col = 0)
fileH2 = pd.read_csv(wdir + files[1],
sep = "\t", header = 0, index_col = 0)
D2index, H2index = set(fileD2.index.tolist()), set(fileH2.index.tolist())
status = []
# Create sets
D2only = D2index - H2index
H2only = H2index - D2index
D2H2 = D2index & H2index
# Subsets of df for D2only, H2only and D2H2
D2onlydf = fileD2[fileD2.index.isin(list(D2only))]
H2onlydf = fileH2[fileH2.index.isin(list(H2only))]
D2H2df = fileD2.append(fileH2)
D2H2df = D2H2df[D2H2df.index.isin(list(D2H2))]
D2H2df.sort_index(inplace=True)
# Create list of lists of all differentially expressed genes with corresponding status
stress = "Mpo_" + files[0].split("controlD2")[0]
for j in D2only:
status.append([j, stress, 'D2', str(fileD2.loc[j, "log2FoldChange"]), str(fileD2.loc[j, "padj"]), "N/A", "N/A"])
for k in H2only:
status.append([k, stress, 'H2', "N/A", "N/A", str(fileH2.loc[k, "log2FoldChange"]), str(fileH2.loc[k, "padj"])])
for m in D2H2:
status.append([m, stress, 'D2H2', str(fileD2.loc[m, "log2FoldChange"]), str(fileD2.loc[m, "padj"]), str(fileH2.loc[m, "log2FoldChange"]), str(fileH2.loc[m, "padj"])])
status.sort()
# Output file with genes and status: D2, H2 or D2H2
with open(wdir + "sets/results/" + stress + ".txt", "w+") as filo:
filo.write("gene\tstress\tstatus\tL2FC_D2\tpadj_D2\tL2FC_H2\tpadj_H2\n")
for n in status:
filo.write("\t".join(n) + "\n")
# Venn diagram
sp_ax = axd[f_axes[stress_list.index(stress.split('_')[1])]]
venn2(subsets=(len(D2only), len(H2only), len(D2H2)),
set_labels = ('D2', 'H2'),
ax = sp_ax)
sp_ax.set_title(stress.split('_')[1], size=14)
plt.savefig(dir_path + "figures/" + "suppfig4.png", dpi = 600)
# increase counter
counter += 1
# + id="SFx5kliR9PQE"
# Sort genes according to whether they are the same in both controls
# adapted from compile_sigGenes_phase1n2.py
deseqdir = dir_path + 'prep_files/mpo/deseq/'
setdir = deseqdir + 'sets/results/'
setdir_safe = setdir.replace(' ', '\ ')
if not os.path.exists(setdir):
# !mkdir -p $setdir_safe
merfile = open(dir_path + 'mercator/MpoProt.results.txt', 'r')
ofile = open(deseqdir + 'resSig_compiled.txt', 'w+')
efile = open(deseqdir + 'resSig_failed.txt', 'w+')
setfiles = [x for x in os.listdir(setdir) if '.txt' in x]
ofile.write("\t".join(['gene', 'stress', 'L2FC_D2', 'L2FC_H2', 'annotation']) + "\n")
efile.write("\t".join(['gene', 'stress', 'L2FC_D2', 'L2FC_H2', 'annotation']) + "\n")
def get_anno(gene):
gene = gene.lower()
return meranno[gene]
def up_down(val):
if val < 0:
stat = "DOWN"
elif val > 0:
stat = "UP"
elif math.isnan(val):
stat = "NaN"
return stat
meranno = {}
for line in merfile:
if len(line.rstrip().split("\t")) == 5:
bincode, name, identifier, desc, ptype = line.rstrip().replace("'", "").split("\t")
if identifier not in meranno:
meranno[identifier] = [[bincode, desc]]
else:
meranno[identifier].append([bincode, desc])
for i in setfiles:
content = pd.read_csv(setdir + i, sep = "\t", header = 0)
sigGenes = content[content['status'] == "D2H2"]
sigGenes['annotation'] = sigGenes['gene'].apply(get_anno)
sigGenes['L2FC_D2'] = sigGenes['L2FC_D2'].apply(up_down)
sigGenes['L2FC_H2'] = sigGenes['L2FC_H2'].apply(up_down)
sigGenes = sigGenes.drop(columns = ['status', 'padj_D2', 'padj_H2'])
for index, row in sigGenes.iterrows():
if row['L2FC_D2'] != row['L2FC_H2']:
efile.write("\t".join([str(z) for z in row]) + "\n")
else:
ofile.write("\t".join([str(z) for z in row]) + "\n")
ofile.close()
efile.close()
# + [markdown] id="jQ3r_Ze15mZf"
# ### 2.3 Diurnal gene expression
# + id="KtgutT8H3MQY"
jtk_dir = dir_path_safe + "JTK/"
# %cd $jtk_dir
# + id="k0usa5wb5pxj" language="R"
# # https://towardsdatascience.com/how-to-install-packages-in-r-google-colab-423e8928cd2e
# #system(paste("cd", paste0(dir_path_safe, "JTK/")))
# source("JTK_CYCLEv3.1.R")
#
# project <- "Mpo_JTK"
#
# options(stringsAsFactors=FALSE)
# annot <- read.delim("annot_diur.txt")
# data <- read.delim("expmat_diur.txt")
#
# rownames(data) <- data[,1]
# data <- data[,-1]
# jtkdist(6, 3) # 6 total time points, 3 replicates per time point
#
# periods <- 6:6 # looking for rhythms between 0-23 hours (i.e. between 1 and 6 time points per cycle).
# jtk.init(periods,4) # 4 is the number of hours between time points
#
# cat("JTK analysis started on",date(),"\n")
# flush.console()
#
# st <- system.time({
# res <- apply(data,1,function(z) {
# jtkx(z)
# c(JTK.ADJP,JTK.PERIOD,JTK.LAG,JTK.AMP)
# })
# res <- as.data.frame(t(res))
# bhq <- p.adjust(unlist(res[,1]),"BH")
# res <- cbind(bhq,res)
# colnames(res) <- c("BH.Q","ADJ.P","PER","LAG","AMP")
# results <- cbind(annot,res,data)
# results <- results[order(res$ADJ.P,-res$AMP),]
# })
# print(st)
#
# save(results,file=paste("JTK",project,"rda",sep="."))
# write.table(results,file=paste("JTK",project,"txt",sep="."),row.names=F,col.names=T,quote=F,sep="\t")
# + id="-vX6y-Do3yOB"
# %cd $dir_path_safe
# + id="AoC7_qrA4iOf"
# adapted from clean_Mpo.py
# To prepare and format Mpo JTK results to Camilla supp standard.
# NR -- Not rhythmic genes ADJ.P <0.05
# NE -- row[1:].max() > 1; no expression of TPM > 1 across all timepoints and replicates
expanno = dir_path + "summary_files/diurnal_exp.txt"
# label conversion to experiment annotation
annodict = {}
with open(expanno, "r") as expannof:
content = expannof.readlines()
for line in content:
label, actual = line.strip().split("\t")
annodict[label] = actual + '_' + label.split('_')[1]
diurlabels = ["gene"]
diurlabels.extend(list(annodict.keys()))
# select only diurnal experiments
diurexpmat = dir_path + 'prep_files/diurnal_exp.tsv'
diuronly = pd.read_csv(diurexpmat, sep='\t')
mpogenes = diuronly.gene.to_list()
diuronly.set_index("gene", inplace=True)
diuronly.columns = [annodict[x] for x in diuronly.columns.to_list()]
# prepping annotation file for JTK_Cycle/supp.
meranno = {}
merp = dir_path + 'mercator/MpoProt.results.txt'
merfile = open(merp, 'r')
merfile.readline()
for line in merfile:
if len(line.rstrip().split("\t")) == 5:
bincode, name, identifier, desc, ptype = line.rstrip().replace("'", "").split("\t")
if identifier not in meranno:
meranno[identifier] = name
# JTK output with expmat_diur
JTKout = pd.read_csv(dir_path + "JTK/JTK.Mpo_JTK.txt", sep = "\t")
JTKgenes = JTKout.Probe.to_list()
NEgenes = [x for x in mpogenes if x not in JTKgenes]
# for supp
colused = list(JTKout.columns)
colunwanted = ['BH.Q', 'PER','AMP']
for i in colunwanted:
colused.remove(i)
# to format it to similar format of Camilla's supp material
forsupp = JTKout[colused]
# defnitions to change not significantly rthymic genes to NR instead of default output values
def NRcheck(num):
if num >= 0.05:
return "NR"
else:
return "{:.2E}".format(num)
forsupp['ADJ.P'] = forsupp['ADJ.P'].apply(lambda x: NRcheck(x))
def phaseCheck(adjval, lagval):
if adjval == "NR":
newval = "NR"
else:
newval = lagval + 2
if newval >= 24:
newval = newval - 24
return newval
forsupp["LAG"] = forsupp.apply(lambda row: phaseCheck(row["ADJ.P"], row["LAG"]), axis = 1)
# to format genes that are not expressed (NE) and excluded in JTK analysis to supp file
NEcollect = {}
for j in NEgenes:
NEcollect[j] = [meranno[j.lower()], "NE", "NE"] + diuronly.loc[j, colused[4:]].to_list()
NEdf = pd.DataFrame(NEcollect, index = colused[1:])
NEdf = NEdf.transpose()
NEdf.reset_index(inplace=True)
NEdf.columns = colused
# combine formatted JTK output and NE genes
combined = forsupp.append(NEdf, ignore_index = True)
combined.sort_values("Probe", inplace=True, ignore_index = True)
# write to directory and ready for use (for analysis)
cleaned = dir_path + "diurnal/"
combined.to_csv(cleaned + "Mpo_supp.txt", index = False, sep = "\t")
# + [markdown] id="V9HG7DByg6Pg"
# # 3. Analysis and plotting
# + [markdown] id="4qA5pIrH7VZW"
# ### Figure 1 & Supp. Fig 1: Measurements and Student's t-test
# + id="DXpM_NKa743D"
# adated from measurements_forsupp.py
wdir = dir_path + 'prep_files/'
odir = dir_path + 'figures/'
odir_safe = dir_path_safe + 'figures/'
if not os.path.exists(odir):
# !mkdir $odir_safe
infile = 'phase1n2_measurements_nooutliers.txt'
measurements = pd.read_csv(wdir + infile, sep='\t')
# Single letter to full single stress description
singled = {'C':'Cold',
'H':'Heat',
'S':'Salt',
'M':'Mannitol',
'L':'Light',
'D':'Dark',
'N':'Nitrogen'
}
# For plotting all controls
areatype = ['Parea', 'Earea']
titletype = ['15', '21']
for i in range(0,2):
area = areatype[i]
title = titletype[i]
control_m = measurements[measurements.Stress == 'Control'][["Batch", area]].groupby('Batch', sort = False).mean()
control_s = measurements[measurements.Stress == 'Control'][["Batch", area]].groupby('Batch', sort = False).std()
control_m.plot.bar(yerr=[list(control_s[area]), list(control_s[area])[::-1]], legend=False, title='Control (Day '+ title + ')', capsize=4)
plt.savefig(odir + 'Control_Day' + title + '.png', dpi = 600, bbox_inches='tight')
plt.show()
# df with only single stress measurements
ss_meas = measurements[(measurements.Condition != 'None') & (measurements.Condition != 'mixed')]
# df with only crossed stress measurements
cs_meas = measurements[measurements.Condition == 'mixed']
# df with only controls
control_meas = measurements[measurements.Stress == 'Control']
controltype = ['Stress']
controltitle = ['_merged']
xaxislabel = {'Heat': 'Temperature (\u00B0C)',
'Cold': 'Temperature (\u00B0C)',
'Mannitol': 'Mannitol (mM)',
'Salt': 'NaCl (mM)',
'Light': 'Light intensity (\u03bcEm\u207b\u00b2s\u207b\u00b9)',
'Dark': 'Days',
'Nitrogen': 'KNO\u2083 (%)'}
# Supp. Fig. 1
# t-test (control as b, following test, a)
tout = open(wdir + 'ttest.txt', 'w+')
from scipy import stats as st
for ss in list(ss_meas.Stress.unique()):
for i, c in enumerate(controltype):
control_batches = ss_meas[ss_meas.Stress == ss].Batch.unique()
control_mean = control_meas[control_meas.Batch.isin(control_batches)].groupby(c, sort = False).mean()
control_std = control_meas[control_meas.Batch.isin(control_batches)].groupby(c, sort = False).std()
stress_mean = ss_meas[ss_meas.Stress == ss].groupby('Condition', sort=False).mean()
stress_std = ss_meas[ss_meas.Stress == ss].groupby('Condition', sort=False).std()
if c == 'Stress': # t-test
control_df = control_meas[control_meas.Batch.isin(control_batches)][['Parea', 'Earea']]
stress_conds = ss_meas[ss_meas.Stress == ss].Condition.unique()
for k, a in enumerate(areatype):
for scond in stress_conds:
stress_df = ss_meas[(ss_meas.Stress == ss) & (ss_meas.Condition == scond)]
tstat, pval = st.ttest_ind(stress_df[a], control_df[a])
tout.write(('\t').join(['Day '+ titletype[k], ss + '_' + scond, 'control_merged', str(tstat), str(pval)]) + "\n")
labels = control_mean.index.to_list() + stress_mean.index.to_list()
for j, a in enumerate(areatype): # Day 15 or 21 area
coll_mean = list(control_mean[a]) + list(stress_mean[a])
coll_std = list(control_std[a]) + list(stress_std[a])
plt.bar(labels, coll_mean, yerr = coll_std, capsize=4)
plt.title(ss + ' (Day ' + titletype[j] + ')')
plt.xlabel(xaxislabel[ss])
plt.ylabel('Area (mm\u00b2)')
plt.savefig(odir + ss + '_Day' + titletype[j] + controltitle[i] + '.png', dpi = 600, bbox_inches='tight')
plt.show()
# cross_stress plot
for i, c in enumerate(controltype):
cs_control_batches = cs_meas.Batch.unique()
cs_control_mean = control_meas[control_meas.Batch.isin(cs_control_batches)].groupby(c, sort = False).mean()
cs_control_std = control_meas[control_meas.Batch.isin(cs_control_batches)].groupby(c, sort = False).std()
cs_stress_mean = cs_meas.groupby('Stress', sort=False).mean()
cs_stress_std = cs_meas.groupby('Stress', sort=False).std()
cs_labels = cs_control_mean.index.to_list() + cs_stress_mean.index.to_list()
if c == 'Stress': #t-test
control_df = control_meas[control_meas.Batch.isin(cs_control_batches)][['Parea', 'Earea']]
for k, a in enumerate(areatype):
for ss in list(cs_meas.Stress.unique()):
stress_df = cs_meas[(cs_meas.Stress == ss)]
tstat, pval = st.ttest_ind(stress_df[a], control_df[a])
tout.write(('\t').join(['Day '+ titletype[k], ss + '_' + 'mixed', 'control_merged', str(tstat), str(pval)]) + "\n")
for j, a in enumerate(areatype): # Day 15 or 21 area
coll_mean = list(cs_control_mean[a]) + list(cs_stress_mean[a])
coll_std = list(cs_control_std[a]) + list(cs_stress_std[a])
plt.bar(cs_labels, coll_mean, yerr = coll_std, capsize=4)
plt.title('Cross stress (Day ' + titletype[j] + ')')
plt.xticks(rotation=90)
plt.xlabel('Experiment')
plt.ylabel('Area (mm\u00b2)')
plt.savefig(odir + 'Cross_stress_Day' + titletype[j] + controltitle[i] + '.png', dpi = 600, bbox_inches='tight')
plt.show()
# single stress reps and cross stress (control - merged)
def ss_grab(stress, condition):
"""
Slice the relevant condition for
Parameters
----------
stress : string
Stress of interest.
condition : string
Condition of interest.
Returns
-------
sssub : dataframe
df of single stress.
"""
sssub = measurements[(measurements.Stress == stress) & (measurements.Condition == condition)]
return sssub
srep_keys = [['Cold', '3'],
['Heat', '33'],
['Salt', '40'],
['Mannitol', '100'],
['Light', '435'],
['Dark', '3'],
['Nitrogen', '0']]
srepdf = measurements[(measurements.Stress == 'Cold') & (measurements.Condition == '3')]
for s, c in srep_keys[1:]:
srepdf = pd.concat([srepdf, ss_grab(s, c)])
s_cs_meas = pd.concat([srepdf, cs_meas])
s_cs_control_batches = list(cs_meas.Batch.unique()) + list(srepdf.Batch.unique())
s_cs_control_mean = control_meas[control_meas.Batch.isin(s_cs_control_batches)].groupby('Stress', sort = False).mean()
s_cs_control_std = control_meas[control_meas.Batch.isin(s_cs_control_batches)].groupby('Stress', sort = False).std()
s_cs_stress_mean = s_cs_meas.groupby('Stress', sort=False).mean()
s_cs_stress_std = s_cs_meas.groupby('Stress', sort=False).std()
s_cs_meas_label = [x + ' (' + x[0] + ')' if len(x) > 2 else x for x in s_cs_stress_mean.index]
s_cs_meas_label[s_cs_meas_label.index('Light (L)')] = 'High light (L)'
s_cs_meas_label[s_cs_meas_label.index('Dark (D)')] = 'Darkness (D)'
s_cs_labels = s_cs_control_mean.index.to_list() + s_cs_meas_label
# Fig1
#t-test
control_df = control_meas[control_meas.Batch.isin(s_cs_control_batches)][['Parea', 'Earea']]
## cross-stress
for k, a in enumerate(areatype):
for ss in list(cs_meas.Stress.unique()):
stress_df = cs_meas[(cs_meas.Stress == ss)]
for singleS in ss:
singlecontrol = srepdf[srepdf.Stress == singled[singleS]][a]
tstat, pval = st.ttest_ind(stress_df[a], singlecontrol)
tout.write(('\t').join(['Day '+ titletype[k], ss + '_' + 'mixed', 'control_' + singled[singleS], str(tstat), str(pval)]) + "\n")
## single stress
for k, a in enumerate(areatype):
for ss in list(srepdf.Stress.unique()):
cond = srepdf[(srepdf.Stress == ss)].Condition.unique()[0]
stress_df = srepdf[(srepdf.Stress == ss)][a]
tstat, pval = st.ttest_ind(stress_df, control_df[a])
tout.write(('\t').join(['Day '+ titletype[k], ss + '_' + cond, 'control', str(tstat), str(pval)]) + "\n")
tout.close()
# plotting
colour_seq = ['tomato'] + ['mediumseagreen']*7 + ['cornflowerblue']*20
for j, a in enumerate(areatype): # Day 15 or 21 area
coll_mean = list(s_cs_control_mean[a]) + list(s_cs_stress_mean[a])
coll_std = list(s_cs_control_std[a]) + list(s_cs_stress_std[a])
plt.bar(s_cs_labels, coll_mean, yerr = coll_std, capsize=4, color = colour_seq)
plt.title('Area (Day ' + titletype[j] + ')')
plt.xticks(rotation=90)
plt.xlabel('Experiment')
plt.ylabel('Area (mm\u00b2)')
plt.savefig(odir + 'fig1_Day' + titletype[j] + '.png', dpi = 600, bbox_inches='tight')
plt.show()
# + [markdown] id="DXF0cNS57aAn"
# ### Figure 2: Interspecies comparison (Biological processes)
#
# + id="PcYVfVEpH0Jc"
# adapted from cross_spe_mapman.py
### FUNCTION ###
def anno_split(row):
return int(row['annotation'][0][0].split(".")[0])
def bin_count(row):
count = row['rel_count']
if count >= 0.5:
return 0.5
elif count >= 0.25:
return 0.35
elif count > 0.0:
return 0.2
else:
return 0
def label_color(xlabel):
if "heat" in xlabel:
return "firebrick"
elif "cold" in xlabel:
return "steelblue"
elif "light" in xlabel:
return "darkorange"
elif "dark" in xlabel:
return "black"
elif "salt" in xlabel:
return "rebeccapurple"
elif "mannitol" in xlabel:
return "mediumvioletred"
elif "nitrogen" in xlabel:
return "forestgreen"
else:
return "slategrey"
def species_color(xlabel):
if "Ath" in xlabel:
return "firebrick"
elif "Cpa" in xlabel:
return "steelblue"
elif "Cre" in xlabel:
return "darkorange"
elif "Osa" in xlabel:
return "rebeccapurple"
elif "Mpo" in xlabel:
return "forestgreen"
### PATHS ###
wdir = dir_path + 'prep_files/'
setres = wdir + 'Figure2_alldata_compiled_updated.txt'
jdir = wdir + 'proteomes/'
mdict = wdir + 'merdict.txt'
### DICTIONARY OF MERCATOR BINS ###
dicto = literal_eval(open(mdict, 'r').read())
### LOAD GENE PER SPECIES AND COUNT
# initialise species
spedicto = {'ARATH' : 'Ath',
'CHLRE' : 'Cre',
'CYAPA' : 'Cpa',
'MARPO' : 'Mpo',
'ORYSA' : 'Osa'}
species_list = list(spedicto.values())
# initialise gene count in species [for % of DGEs]
Gdicto = {}
# {"species" : ["gene1", "gene2"...]}
for pepfile in [x for x in os.listdir(jdir) if '.ini' not in x]:
with open(jdir + pepfile, "r") as peppy:
species, genes = pepfile.split('.fa')[0], []
for lini in peppy:
if '>' in lini:
genes.append(lini.strip().split('>')[1])
Gdicto[spedicto[species]] = len(genes)
# Get only Ath genes (for name conversion, mercator output)
athdict = {}
with open(jdir + "ARATH.fa", "r") as athgenes:
for lini in athgenes:
if '>' in lini:
genename = lini.strip().split('>')[1]
athdict[genename.lower()] = genename
athdict2 = {} # for name conversion (DGE table)
with open(jdir + "ARATH.fa", "r") as athgenes:
for lini in athgenes:
if '>' in lini:
genename = lini.strip().split('>')[1]
athdict2[genename.lower().capitalize()] = genename
### LOAD SIGNIFICANTLY DIFFERENTIAL GENE TABLE ###
sigtable = pd.read_csv(setres, sep='\t', header=0, index_col=0)
sigtable = sigtable.reset_index()
sigtable["gene"].replace(athdict2, inplace=True)
sigtable = sigtable.set_index("gene")
### DICTIONARY OF MERCATOR ANNOTATION ###
merdir = wdir + 'mercator_results/'
merlist = [x for x in os.listdir(merdir) if '.results.txt' in x]
# Read mercator annotations (list of lists) as lists instead of string
sigtable['annotation'] = sigtable['annotation'].apply(literal_eval)
meranno = {}
map2anno = {}
for i in merlist:
sp = i.split("Prot")[0]
merfile = open(merdir + i, 'r')
merfile.readline()
for line in merfile:
linecon = line.rstrip().replace("'", "").split("\t")
if len(linecon) == 5:
bincode, name, identifier, desc, ptype = linecon
if identifier not in meranno:
meranno[identifier] = [sp, [bincode.split('.')[0]]]
else:
meranno[identifier][1].append(bincode.split('.')[0])
if len(linecon[0].split('.')) == 2:
map2anno[linecon[0]] = linecon[1]
merdf = pd.DataFrame.from_dict(meranno, orient = 'index', columns = ['species', 'code'])
merdf = merdf.reset_index()
merdf["index"].replace(athdict, inplace=True)
merdf = merdf.set_index("index")
all_s = list(set(sigtable.stress.to_list()))
def bin_collate(updown):
'''
To generate df of collated bins per stress
Parameters
----------
updown : str
choice of whether to construct for upregulated or downregulated genes.
Returns
-------
None.
'''
dicto = {}
for stress in all_s:
genes = [x for x in sigtable[(sigtable.stress == stress) & (sigtable.L2FC_D2 == updown)].index.to_list()]
beans = [y[0].split('.')[0] for x in sigtable[(sigtable.stress == stress) & (sigtable.L2FC_D2 == updown)].annotation.to_list() for y in x]
dicto[stress] = [genes, beans]
df_beans = pd.DataFrame.from_dict(dicto, orient='index', columns=['gene', 'bins'])
return df_beans
def sig_df(df, sigcol, mapbins):
"""
Calculates and correct mapman bin enrichment p-value for all stresses
Returns dataframe
Parameters
----------
df : dataframe
df containing genes and corresponding mapman bins of DEGs.
sigcol : str
column name to use for enrichment
mapbins : list
list of mapman annotation/bins to use
Returns
-------
df_sig : dataframe
df summarising enrichment (corrected p-value) for each mapman bin (row)
and each stress (column).
"""
sig_sum = {}
for s in all_s:
s_count = Counter(df.loc[s][sigcol])
valid_bins = list(s_count.keys()) # bins found in stress
# initialise count dicitonary
sig_count = {}
for key in valid_bins:
sig_count[key] = 1
# initilaise values for mercator by species
sp = s.split('_')[0]
# random simulations
simno = 1000
for i in range(simno):
shuffle = merdf[merdf.species == sp].code.to_list()
random.shuffle(shuffle)
sub = shuffle[:len(df.loc[s].gene)]
sub_count = Counter([y for x in sub for y in x])
for mapman in valid_bins:
if sub_count[mapman] >= s_count[mapman]:
sig_count[mapman] += 1
# p-value calculation
pval_coll = []
for mapman in valid_bins:
pval = sig_count[mapman]/simno
# correction for pval > 1
if pval <= 1:
pval_coll.append(pval)
else:
pval_coll.append(float(round(pval)))
# BH correction for multiple testing
y = multipletests(pvals=pval_coll, alpha=0.05, method="fdr_bh")[1]
all_bins_corr_pval = []
for mapman in mapbins:
if mapman in valid_bins:
all_bins_corr_pval.append(y[valid_bins.index(mapman)])
else:
all_bins_corr_pval.append(np.nan)
sig_sum[s] = all_bins_corr_pval
df_sig = pd.DataFrame.from_dict(sig_sum, orient='index', columns=mapbins)
return df_sig
def chunk(uval, dval):
if math.isnan(uval) and math.isnan(dval):
# not differentially regulated
cat = 0
elif uval >= 0.05 and (dval >= 0.05 or math.isnan(dval)):
# not enriched
cat = 0
elif dval >= 0.05 and (uval >= 0.05 or math.isnan(uval)):
# not enriched
cat = 0
elif uval < 0.05 and dval < 0.05:
# differentially up and downregulated in bin
cat = 2
elif dval < 0.05:
# differentially downregualted
cat = 1
elif uval < 0.05:
# differentially upregulated
cat = 3
return cat
# initialisation for enrichment
#mapbins = list(map2anno.keys()) #level 2
mapbins = [str(x) for x in list(dicto.keys())] # level 1
# segregate stress and associated genes and mapman bins into up and downregulated df respectively
df_U = bin_collate('UP')
df_D = bin_collate('DOWN')
# df of significance values
df_sig_U = sig_df(df_U, 'bins', mapbins)
df_sig_D = sig_df(df_D, 'bins', mapbins)
cat_dict = {}
for mapman in list(df_sig_U.columns):
cat_col = []
for stress in list(df_sig_U.index):
uval, dval = df_sig_U.loc[stress, mapman], df_sig_D.loc[stress, mapman]
cat_col.append(chunk(uval,dval))
cat_dict[mapman] = cat_col
df_combined_sig = pd.DataFrame.from_dict(cat_dict, orient='index', columns=list(df_sig_U.index))
df_combined_sig = df_combined_sig.loc[df_combined_sig.max(axis=1) > 0,:] # remove bins w/o enrichment
df_combined_sig = df_combined_sig.loc[(df_combined_sig > 0).sum(axis=1) >2,:] # select for at least 2 enrichment
df_combined_sig = df_combined_sig.loc[:,df_combined_sig.max() > 0] # remove stresses w/o enrichment
def ji_cal(a, b):
# jaccard index calculation
return len(a&b) / len(a|b)
def jdistprep(df, axis):
'''
Convert df to sets (for calculation of JD of X axis)
Parameters
----------
df : dataframe
dataframe of categorical variables to be converted to sets.
axis : int
axis to do sets on, 0 by column (default), 1 by row
Returns
-------
dicto : dict
dictionary containing list of column values.
'''
if axis == 1:
df = df.T
dxkeys = df.columns.to_list()
dykeys = df.index.to_list()
dicto = {}
for col in dxkeys:
dicto[col] = [dykeys[i] + '_' + str(x) for i, x in enumerate(df[col].to_list())]
return [dicto, dxkeys]
def jdist(df, axis=0):
'''
Construct jaccard distance square matrix
Parameters
----------
df : df
dataframe to be used for jiprep/ jdist calculation.
axis : int
axis to do sets on, 0 by column (default), 1 by row
Returns
-------
linkage_matrix : list
condensed jaccard distance matrix.
jlist : list
list of list (jaccard distance square matrix)
dicto : dict
dictionary of list
'''
dicto, dxkeys = jdistprep(df, axis)
jlist = []
for key in dxkeys:
col = []
for key2 in dxkeys:
set1, set2 = dicto[key], dicto[key2]
set1x = set([x for x in set1 if x.split('_')[1] != '0'])
set2x = set([x for x in set2 if x.split('_')[1] != '0'])
col.append(1 - ji_cal(set1x, set2x))
jlist.append(col)
dists = squareform(jlist)
linkage_matrix = linkage(dists, "single")
return linkage_matrix, jlist, dicto
def plot_dendro(linkage_matrix, ax, orient):
'''
Plots dendrogram into subplot
Parameters
----------
mat : list of lists
Contains the square matrix of jaccard distances.
ax : axes
axis of subplot to plot to.
orient : str
orientation of dendrogram to be plotted.
Returns
-------
None.
'''
dendrogram(linkage_matrix, no_labels=True, ax=ax, orientation=orient, color_threshold=0, above_threshold_color='#000000')
xmat, xlist, xdict = jdist(df_combined_sig)
ymat, ylist , ydict = jdist(df_combined_sig, axis=1)
yden = dendrogram(ymat, labels=df_combined_sig.index.to_list(), orientation='left')
plt.show()
xden = dendrogram(xmat, labels=df_combined_sig.columns.to_list(), orientation='top')
plt.show()
yorder = yden['ivl']
xorder = xden['ivl']
df_sig_reordered = df_combined_sig[xorder]
df_sig_reordered = df_sig_reordered.reindex(yorder[::-1])
"""
Custom plot
"""
# Create plot with subplot
fig, ax = plt.subplots(6,2, constrained_layout=True,
figsize=(16.3, 18), # (width, height)
gridspec_kw={'width_ratios': [1, 8.3],
'height_ratios': [0.5,0.5,0.5,0.5,1,5.3]})
plt.rcParams['font.size'] = '16'
ax_1, ax_2, ax_3, ax_4, ax_5, ax_6, ax_7, ax_8, ax1, ax2, ax3, ax4 = ax.flatten()
for ax in [ax_1, ax_2, ax_3, ax_4, ax_5, ax_6, ax_7, ax_8, ax1, ax2, ax3, ax4]:
ax.tick_params(axis='both', which='major', labelsize=16)
ax_1.axis('off') # empty
#ax_2.axis('off') # DGE % of genes
ax_3.axis('off') # empty
#ax_4.axis('off') # DGE % of TFs
ax_5.axis('off') # empty
#ax_6.axis('off') # DGE % of kinases
ax_7.axis('off') # empty
#ax_8.axis('off') # %DGE up/down reg
ax1.axis('off') # cbar
ax2.axis('off') # dendrogram row
ax3.axis('off') # dendrogram column
# ax_2 DGE % of genes
### STATISTICS OF DGEs ###
DGEperdict = {}
for sx in sigtable.stress.unique():
DGEperdict[sx] = (len(sigtable[sigtable.stress == sx])/Gdicto[sx.split("_")[0]])*100
DGEper = pd.DataFrame(DGEperdict, index = ["DGEs"])
DGEper = DGEper.transpose()
DGEper["NotDGE"] = DGEper.apply(lambda row: 100 - row, axis=0)
DGEper = DGEper.reindex(xorder)
DGEper.plot.bar(stacked = True,
color = {"DGEs":"firebrick", "NotDGE":"darkgrey"},
edgecolor = "black",
ylim = [0, 50],
ax = ax_2)
handles, labels = ax_2.get_legend_handles_labels()
ax_2.legend(handles=handles[:-1], labels=labels[:-1],
loc='center left', bbox_to_anchor=(1, 0.5))
ax_2.set_ylabel("% genes", rotation = 90, fontsize=16)
ax_2.yaxis.set_label_coords(-0.06,0.36)
ax_2.axes.get_xaxis().set_visible(False)
# ax_4 DGE % of DGEs that are TFs
tfdir = dir_path + 'tf_kinases/'
tfpaths = [x for x in os.listdir(tfdir) if ".ini" not in x and ".TF." in x]
tfdict = {}
for file in tfpaths:
tempspe = spedicto[file.split(".")[0]]
content = open(tfdir + file, "r")
for line in content:
gene, anno = line.strip().split("\t")
if anno!= "NoFunction":
tfdict[gene] = anno
sigmod = sigtable.reset_index()
tfdf = pd.DataFrame(columns = list(sigmod.columns))
for sx in sigtable.stress.unique():
tfsubset = sigmod[(sigmod.stress == sx) & (sigmod.apply(lambda row: row["gene"] in tfdict, axis = 1))]
tfdf = tfdf.append(tfsubset, ignore_index = True)
TFperdict = {}
for sx in sigtable.stress.unique():
TFperdict[sx] = (len(tfdf[tfdf.stress == sx])/len(sigmod[sigmod.stress == sx]))*100
TFper = pd.DataFrame(TFperdict, index = ["TFs"])
TFper = TFper.transpose()
TFper["NotTFs"] = TFper.apply(lambda row: 100 - row, axis=0)
TFper = TFper.reindex(xorder)
TFper.plot.bar(stacked = True,
color = {"TFs":"forestgreen", "NotTFs":"darkgrey"},
edgecolor = "black",
ax = ax_4,
ylim = [0,25])
TFhandles, TFlabels = ax_4.get_legend_handles_labels()
ax_4.legend(handles=TFhandles[:-1], labels=TFlabels[:-1],
loc='center left', bbox_to_anchor=(1, 0.5))
ax_4.set_ylabel("% DEGs", rotation = 90, fontsize=16)
ax_4.yaxis.set_label_coords(-0.06,0.36)
ax_4.axes.get_xaxis().set_visible(False)
# ax_6 DGE % of DGEs that are kinases
kindir = dir_path + 'tf_kinases/'
kinpaths = [x for x in os.listdir(kindir) if ".ini" not in x and ".kinases." in x]
kindict = {}
for file in kinpaths:
tempspe = spedicto[file.split(".")[0]]
content = open(kindir + file, "r")
for line in content:
gene, anno, anno1, anno2 = line.strip().split("\t")
if anno!= "NoFunction":
kindict[gene] = [anno, anno1, anno2]
kindf = pd.DataFrame(columns = list(sigmod.columns))
for sx in sigtable.stress.unique():
if sx.split("_")[0] == "Ath":
kinsubset = sigmod[(sigmod.stress == sx) & (sigmod.apply(lambda row: row["gene"].upper() in kindict, axis = 1))]
else:
kinsubset = sigmod[(sigmod.stress == sx) & (sigmod.apply(lambda row: row["gene"] in kindict, axis = 1))]
kindf = kindf.append(kinsubset, ignore_index = True)
kinperdict = {}
for sx in sigtable.stress.unique():
kinperdict[sx] = (len(kindf[kindf.stress == sx])/len(sigmod[sigmod.stress == sx]))*100
kinper = pd.DataFrame(kinperdict, index = ["kinases"])
kinper = kinper.transpose()
kinper["NotKinases"] = kinper.apply(lambda row: 100 - row, axis=0)
kinper = kinper.reindex(xorder)
kinper.plot.bar(stacked = True,
color = {"kinases":"darkgoldenrod", "NotKinases":"darkgrey"},
edgecolor = "black",
ax = ax_6,
ylim = [0,25])
kinhandles, kinlabels = ax_6.get_legend_handles_labels()
ax_6.legend(handles=kinhandles[:-1], labels=kinlabels[:-1],
loc='center left', bbox_to_anchor=(1, 0.5))
ax_6.set_ylabel("% DEGs", rotation = 90, fontsize=16)
ax_6.yaxis.set_label_coords(-0.06,0.36)
ax_6.axes.get_xaxis().set_visible(False)
# ax_8 DGE, up/down ratio
uddict = {}
for sx in sigtable.stress.unique():
upcount = len(sigtable[(sigtable.stress == sx) & (sigtable.L2FC_D2 == "UP")])
downcount = len(sigtable[(sigtable.stress == sx) & (sigtable.L2FC_D2 == "DOWN")])
total = upcount + downcount
uddict[sx] = [downcount/(total)*100, upcount/(total)*100]
udper = pd.DataFrame(uddict, index = ["downregulated", "upregulated"])
udper = udper.transpose()
udper = udper.reindex(xorder)
udper.plot.bar(stacked = True,
color = {"upregulated":"firebrick", "downregulated":"navy"},
edgecolor = "white",
yticks = [0, 50, 100],
ax = ax_8)
dgehandles, dgelabels = ax_8.get_legend_handles_labels()
ax_8.legend(handles=dgehandles[::-1], labels=dgelabels[::-1],
loc='center left', bbox_to_anchor=(1, 0.5))
ax_8.set_ylabel("% DEGs", rotation = 90, fontsize=16)
ax_8.yaxis.set_label_coords(-0.06,0.36)
ax_8.axes.get_xaxis().set_visible(False)
# ax2/3 Dendrogram
plot_dendro(xmat, ax2, 'top')
plot_dendro(ymat, ax3, 'left')
# ax4 Heatmap
from matplotlib.colors import ListedColormap
cmap = ListedColormap(["lightgray", "royalblue", "violet", "firebrick"])
catno = 4
hplot = ax4.imshow(df_sig_reordered, cmap=cmap)
ax4.yaxis.tick_right()
ax4.set_ylabel("")
ax4.set_xticks(np.arange(0, len(df_sig_reordered.columns), 1))
ax4.set_yticks(np.arange(0, len(df_sig_reordered), 1))
xcolour = [species_color(x) for x in xorder]
newlabel = df_sig_reordered.columns.to_list()
longlabel = ['heat', 'cold', 'light', 'dark', 'salt', 'mannitol', 'nitrogen']
shortlabel = ['H', 'C', 'L', 'D', 'S', 'M', 'N']
for i, y in enumerate(longlabel):
newlabel = [x.replace(y, shortlabel[i]) for x in newlabel]
newlabel = [x.replace('_', ' ') for x in newlabel]
ax4.set_xticklabels(newlabel, rotation=90, fontsize=18)
for i, tick_label in enumerate(ax4.get_xticklabels()):
tick_text = tick_label.get_text()
tick_label.set_color(xcolour[i])
anno_long = ['annotated', 'cellulose', 'biosynthesis', 'hemicellulose', 'pectin', 'channels', 'degradation']
ax4.set_yticklabels([dicto[int(x)] for x in df_sig_reordered.index.to_list()], fontsize=18)
# colourbar
cbarticks = [(x/(catno*2))*(catno-1) for x in range(1,catno*2,2)]
axins = inset_axes(ax1,
width="40%",
height="90%",
loc = 'center')
cbar = fig.colorbar(hplot, cax=axins, ticks = cbarticks)
cbar.ax.set_yticklabels(['N', 'D', 'UD', 'U'])
#plt.tight_layout()
plt.savefig(dir_path + 'figures/fig2.png',
dpi=600,
bbox_inches='tight')
# reset rcparams
import matplotlib as mpl
mpl.rcParams.update(mpl.rcParamsDefault)
# + [markdown] id="EGvetfN37wGO"
# ### Supp. Fig 5: Interspecies comparison (Gene families)
# + id="gaI3DxHSPhD2"
wdir = dir_path + 'prep_files/'
jdir = wdir + 'proteomes/'
OFfile = 'Orthogroups.txt'
DGEfile = wdir + 'Figure2_alldata_compiled_updated.txt'
jhdir = wdir + 'interspeGF/'
jhdir_safe = dir_path_safe + 'prep_files/interspeGF/'
if not os.path.exists(jhdir):
# !mkdir $jhdir_safe
# initialise species
spedicto = {'ARATH' : 'Ath',
'CHLRE' : 'Cre',
'CYAPA' : 'Cpa',
'MARPO' : 'Mpo',
'ORYSA' : 'Osa'}
species_list = list(spedicto.values())
# initialise genes in species
Gdicto = {}
# {"species" : ["gene1", "gene2"...]}
for pepfile in [x for x in os.listdir(jdir) if '.ini' not in x]:
with open(jdir + pepfile, "r") as peppy:
species, genes = pepfile.split('.fa')[0], []
for lini in peppy:
if '>' in lini:
genes.append(lini.strip().split('>')[1])
Gdicto[spedicto[species]] = genes
# initialise orthofinder groups and corresponding genes by species
OFdicto = {}
OG_list = []
#{"OGx_spe":["gene1", "gene2"]}
with open(wdir+OFfile, "r") as content:
for line in content:
og, val = line.split(': ')[0], line.rstrip().split(': ')[1]
OG_list.append(og)
for spe in spedicto.values():
OFdicto[og + "_" + spe] = [x for x in val.split(' ') if x in Gdicto[spe]]
# initialise DGEs
DGEdicto = {}
# {"gene1_stress" : "UP/DOWN"}
spe_stress = []
with open(DGEfile, "r") as dgecon:
dgecon.readline()
for lino in dgecon:
gene, stress, L2FC_D2, L2FC_H2, annotation = lino.strip().split("\t")
# to account for difference in gene names in this file and in Orthogroups.txt and ARATH.fa
if stress.split("_")[0] == "Ath":
gene = gene.upper()
elif stress.split("_")[0] == "Cre":
gene = gene.split(".t")[0]
DGEdicto[gene + '_' + stress] = L2FC_D2
if stress not in spe_stress:
spe_stress.append(stress)
# Functions
def get_ortho_status(og, spe, stress):
"""
Parameters
----------
og : str
orthogroup name.
spe : str
species code (3 letters).
stress : str
type of stress.
Returns
-------
status : str/None
og + "_UP": consistently UP
og + "_DOWN": consistently DOWN
og + "_AMB": ambiguous, UP and DOWN detected
og + "_NC" : all genes present have no signicant DGEs
None: No gene present in orthogroup
"""
status = []
if len(OFdicto[og + "_" + spe]) > 0:
for gene in OFdicto[og + "_" + spe]:
if gene + "_" + spe + "_"+ stress in DGEdicto:
status.append(DGEdicto[gene + "_" + spe + "_"+ stress])
else:
status.append("NC") # need to account for unchanged genes
status = list(set(status))
if len(status) > 1: # more than 1 type, AMB: ambiguous
if "UP" in status and "DOWN" in status:
status = "AMB"
elif "UP" in status:
status = "UP"
elif "DOWN" in status:
status = "DOWN"
elif status[0] == "NC": # only 1 type, NC: no change
status = "NC"
elif status[0] == "DOWN": # only 1 type, DOWN: downregulated
status = "DOWN"
elif status[0] == "UP": # only 1 type, UP: upregulated
status = "UP"
else:
status = "None" # no gene in the orthogroup
return status
all_statuses = {}
for item in spe_stress:
species, stress_type = item.split("_", 1)
# container for collecting all the orthogroup status for item in spe_stress
spe_stress_stat = []
for orthogroup in OG_list:
spe_stress_stat.append(get_ortho_status(orthogroup, species, stress_type))
all_statuses[item] = spe_stress_stat
# Initialize dataframe for OG (rows)/spe_stress (cols)
OGstats = pd.DataFrame(all_statuses, index = OG_list, columns = spe_stress)
OGstats.to_csv(jhdir + 'OGstats.txt', sep="\t")
# Calculate Jaccard distance
# Functions
def cal_jd(cond1, cond2):
"""
Parameters
----------
stress1 : pandas Series
OG stat of first stress
stress2 : pandas Series
OG stat of second stress
Returns
-------
status : float
modified jaccard distance
"""
# Score container
score = ""
if "None" in cond1 or "None" in cond2: # 1 or more OG absent
score = None
elif cond1 != cond2: # status do not match
score = 0
elif cond1 == "NC" and cond2 == "NC": # match but NC
score = None
elif cond1 == cond2:
score = 1 # match
return score
jd_dict = {}
jd_counts = {}
for i in range(len(spe_stress)):
# container for jaccard distances
jd_con = []
# container for counts of OG per comparison
count_con = []
for j in range(len(spe_stress)):
if spe_stress[i] == spe_stress[j]:
jd_con.append(0)
# reflects the number of OG != "None"
count_con.append(sum(OGstats[spe_stress[i]] != "None"))
else:
interdf = OGstats[[spe_stress[i], spe_stress[j]]]
scoreseries = interdf.apply(lambda row: cal_jd(row[spe_stress[i]], row[spe_stress[j]]), axis=1)
jd_con.append(1 - (scoreseries.sum()/scoreseries.count()))
count_con.append(scoreseries.count())
jd_dict[spe_stress[i]] = jd_con
jd_counts[spe_stress[i]] = count_con
# Initialize dataframe for spe_stress (rows)/spe_stress (cols) [jaccard distance]
JDstats = pd.DataFrame(jd_dict, index = spe_stress, columns = spe_stress)
JDstats.to_csv(jhdir + 'JDstats.txt', sep="\t")
# Initialize dataframe for spe_stress (rows)/ spe_stress (cols)
# [OG count used to calculate jaccard distance]
JDcounts = pd.DataFrame(jd_counts, index = spe_stress, columns = spe_stress)
JDcounts.to_csv(jhdir + 'JDcounts.txt', sep="\t")
# clustermap -- distance matrix
def label_color(xlabel):
if "heat" in xlabel or 'Mpo_H' in xlabel:
return "firebrick"
elif "cold" in xlabel or 'Mpo_C' in xlabel:
return "steelblue"
elif "light" in xlabel or 'Mpo_L' in xlabel:
return "darkorange"
elif "dark" in xlabel or 'Mpo_D' in xlabel:
return "black"
elif "salt" in xlabel or 'Mpo_S' in xlabel:
return "rebeccapurple"
elif "mannitol" in xlabel or 'Mpo_M' in xlabel:
return "mediumvioletred"
elif "nitrogen" in xlabel or 'Mpo_N' in xlabel:
return "forestgreen"
else:
return "slategrey"
import scipy.spatial as sp, scipy.cluster.hierarchy as hc
colnames = JDstats.index.to_series().apply(lambda row: row.split("_")[0])
specoldict = dict(zip(colnames.unique(), "rgbcy"))
specol = colnames.map(specoldict)
linkage = hc.linkage(sp.distance.squareform(JDstats), method='single')
g = sns.clustermap(JDstats,
row_linkage = linkage,
col_linkage = linkage,
row_colors = specol,
xticklabels = True,
yticklabels=True)
newlabel = [x.get_text() for x in g.ax_heatmap.axes.get_xticklabels()]
longlabel = ['heat', 'cold', 'light', 'dark', 'salt', 'mannitol', 'nitrogen']
shortlabel = ['H', 'C', 'L', 'D', 'S', 'M', 'N']
for i, y in enumerate(longlabel):
newlabel = [x.replace(y, shortlabel[i]) for x in newlabel]
newlabel = [x.replace('_', ' ') for x in newlabel]
g.ax_heatmap.axes.set_xticklabels(newlabel, rotation=90, fontsize=16)
for tick_label in g.ax_heatmap.axes.get_yticklabels():
tick_text = tick_label.get_text()
tick_label.set_color(label_color(tick_text))
g.ax_heatmap.axes.set_yticklabels(newlabel, fontsize=16)
plt.savefig(dir_path + 'figures/suppfig5.png')
# + [markdown] id="3oBtH2a37ckp"
# ### Figure 3: Stress responsiveness
# + id="XsnoIc2NYqKh"
# adapted from stress_res_og.py
OFpath = dir_path + 'prep_files/Orthogroups.txt'
jhdir = dir_path + 'prep_files/interspeGF/'
prefix = {'Cpa|' : 'Cpa',
'Cre' : 'Cre',
'Mp' : 'Mpo',
'ChrUn' : 'Osa',
'LOC_Os' : 'Osa',
'AT' : 'Ath'}
spelist = list(prefix.values())
def spe_finder(gene):
'''
Finds the species the gene belongs to
Parameters
----------
gene : str
Gene ID.
Returns
-------
spestat : str
Corresponding species of gene.
'''
spestat = 'Other'
for k in list(prefix.keys()):
if gene.startswith(k):
spestat = prefix[k]
return spestat
# order of species
spe_order = ['Cpa', 'Cre', 'Mpo', 'Osa', 'Ath']
spe_class = [['Angiosperm', ['Osa', 'Ath']],
['Embryophyte', ['Mpo', 'Osa', 'Ath']],
['Viridiplantae', spe_order[1:]],
['Archaeplastida', spe_order]]
def speclass(spelist):
spec = []
for spe in spelist:
for i, c in enumerate(spe_class):
if spe in c[1]:
spec.append(i)
break
return spe_class[max(spec)][0]
# initialise OG stats df
OGstats = pd.read_csv(jhdir + 'OGstats.txt', sep="\t", index_col=0)
newlabel = OGstats.columns.to_list()
longlabel = ['heat', 'cold', 'light', 'dark', 'salt', 'mannitol', 'nitrogen', 'drought']
shortlabel = ['H', 'C', 'L', 'D', 'S', 'M', 'N', 'M']
for i, y in enumerate(longlabel):
newlabel = [x.replace(y, shortlabel[i]) for x in newlabel]
OGstats.columns = newlabel
# initialise DGEs
DGEfile = dir_path + 'prep_files/Figure2_alldata_compiled_updated.txt'
DGElist = []
DGEbins = {}
# {"gene1_stress" : "UP/DOWN"}
spe_stress = []
with open(DGEfile, "r") as dgecon:
dgecon.readline()
for lino in dgecon:
gene, stress, L2FC_D2, L2FC_H2, annotation = lino.strip().split("\t")
# to account for difference in gene names in this file and in Orthogroups.txt and ARATH.fa
if stress.split("_")[0] == "Ath":
gene = gene.upper()
elif stress.split("_")[0] == "Cre":
gene = gene.split(".t")[0]
DGElist.append(gene)
binlist = [int(x[0].replace("'", "").split('.')[0]) for x in literal_eval(annotation)]
DGEbins[gene] = list(set(binlist))
DGElist = set(DGElist)
og_genes = {}
spespec_og = {} # list of species specific OGs, excludes OGs of species not included in analysis
other_og = {} # # list of non-species specific OGs, excludes OGs of species not included in analysis
with open(OFpath, 'r') as OFfile:
for line in OFfile:
og, val = line.split(': ')[0], line.rstrip().split(': ')[1]
og_species = list(set([spe_finder(x) for x in val.split(' ')]))
if len(og_species) == 1 and og_species[0] in spelist:
spespec_og[og] = og_species[0]
og_genes[og] = list(set(val.split(' ')) & DGElist)
elif len(og_species) > 1:
ogclass = speclass(og_species)
other_og[og] = ogclass
og_genes[og] = list(set(val.split(' ')) & DGElist)
# =============================================================================
#
# Stress-responsive OGs
#
# =============================================================================
from collections import Counter, defaultdict
df_coln = ['Archaeplastida', 'Viridiplantae', 'Embryophyte', 'Angiosperm']
# omit OGs that contain only 'None' across all stresses
oglist = OGstats.index.to_list()
suboglist = [x for x in oglist if set(OGstats.loc[x].to_list()) != {'None'}]
val_spespec_og = list(set(spespec_og) & set(suboglist))
val_spespec_og.sort()
val_other_og = list(set(suboglist) - set(val_spespec_og))
val_other_og.sort()
# df that contains only OGs that are not made up of 'None'
subdf = OGstats.loc[suboglist]
# dictionary to contain all statuses
og_stress_stat = defaultdict(list)
def update_stress_stat(newstat, phyla, dfcount, og, stresstype):
dfcount.loc[newstat, phyla] += 1
og_stress_stat[og].append(stresstype + '_' + newstat)
# to get counts per stress
stresslist = shortlabel[:-1]
def counts_per_stress(slabel):
stresstype = '_' + slabel
valid_exp = [x for x in OGstats.columns.to_list() if stresstype in x]
valid_spe = [x.split('_')[0] for x in valid_exp]
ordered_exp = [y for x in spe_order for y in valid_exp if x in y]
unique_spe = [x for x in spe_order if x in valid_spe]
stresssub = subdf[ordered_exp] # df containing only exps of required stress type
dumdict = {'UP': [0 for x in range(4 + len(unique_spe))],
'DOWN': [0 for x in range(4 + len(unique_spe))],
'AMB': [0 for x in range(4 + len(unique_spe))],
'MIXED': [0 for x in range(4 + len(unique_spe))],
'NR': [0 for x in range(4 + len(unique_spe))]}
dfcount = pd.DataFrame.from_dict(dumdict, orient='index', columns = df_coln + unique_spe)
nogroup = []
for og, ogclass in spespec_og.items(): # species specific OGs
kcount = Counter(stresssub.loc[og].to_list())
# species specific OGs
if kcount['None'] != len(valid_spe): # ignore if OG not valid for stress
if slabel == 'S' and ogclass == 'Osa':
if kcount['NC'] == 2: # for species specific OG that is 'NC' [Osa]
update_stress_stat('NR', ogclass, dfcount, og, slabel)
else: # kcount['NC']!= 2
stat = [x for x in kcount if x != 'None' and 'NC']
if len(stat) == 1: # only one type of UP/DOWN/AMB
update_stress_stat(stat[0], ogclass, dfcount, og, slabel)
else: # mixture of UP/DOWN/AMB
update_stress_stat('MIXED', ogclass, dfcount, og, slabel)
else:
if 'NC' not in kcount: # for species specific OG that is not 'NC'
stat = [x for x in kcount if kcount[x] == 1]
update_stress_stat(stat[0], ogclass, dfcount, og, slabel)
else: # for species specific OG that is 'NC'
update_stress_stat('NR', ogclass, dfcount, og, slabel)
for og, ogclass in other_og.items(): # non-species specific OGs
kcount = Counter(stresssub.loc[og].to_list())
if kcount['None'] != len(valid_spe): # ignore if OG not valid for stress
notnil = [x for x in list(kcount.keys()) if x != 'None' and x != 'NC']
if len(notnil) > 0:
if len(notnil) > 1: # contains combination of UP/DOWN/AMB
update_stress_stat('MIXED', ogclass, dfcount, og, slabel)
elif len(notnil) == 1: # contains only one type of status apart from 'NR'
if kcount[notnil[0]] > 1: # if UP/DOWN/AMB appear more than once
update_stress_stat(notnil[0], ogclass, dfcount, og, slabel)
elif kcount[notnil[0]] == 1: # if UP/DOWN/AMB only appear once
update_stress_stat('NR', ogclass, dfcount, og, slabel)
else:
nogroup.append(og)
else: # OGs that only have 'None' and 'NC'
update_stress_stat('NR', ogclass, dfcount, og, slabel)
else: # all 'None', meaning that OG is valid in other species not present in this analysis
update_stress_stat('NR', ogclass, dfcount, og, slabel)
return dfcount.T
dH, dC, dL, dD, dS, dM, dN = [counts_per_stress(x) for x in stresslist]
dflist = dH, dC, dL, dD, dS, dM, dN
wdir = dir_path + 'phylostrata/'
wdir_safe = dir_path_safe + 'phylostrata/'
if not os.path.exists(wdir):
# !mkdir $wdir_safe
for i, df in enumerate(dflist):
df.to_csv(wdir + stresslist[i] + '_df.txt', sep="\t")
# =============================================================================
#
# Quantifying stress responsiveness of Orthogroups
#
# =============================================================================
# intermediate container for new df
resog_dict = {}
for dicto in [other_og, spespec_og]:
for og, phyla in dicto.items():
reslist = [x for x in og_stress_stat[og] if 'NR' not in x]
resog_dict[og] = [phyla, reslist, len(reslist), og_genes[og]]
resog_df = pd.DataFrame.from_dict(resog_dict, orient='index', columns=['Phylostrata', 'Responsive in', 'Count', 'Genes'])
resog_df.to_csv(wdir + 'resog_df.txt', sep="\t")
sorder = df_coln + spe_order
grouped_count = resog_df.groupby(['Phylostrata','Count']).count()['Genes'].unstack().reindex(sorder)
grouped_per = grouped_count.copy()
for row in sorder:
grouped_per.loc[row] = grouped_per.loc[row].apply(lambda x: (x/grouped_count.loc[row].sum())*100)
sorder.remove('Angiosperm')
g = grouped_count.loc[sorder].plot.bar(stacked=True, ylabel = 'Count')
g.legend(bbox_to_anchor=(1, 0.75))
g2 = grouped_per.loc[sorder,[i for i in range(1,8)]].plot.bar(stacked=True, ylabel='Percentage (%)')
g2.legend(bbox_to_anchor=(1, 0.75))
# Percentage of OGs from various phylostrata that are responsive in respective number of stresses (x-axis)
grouped_per_bycount = grouped_count.copy()
for col in grouped_per_bycount.columns.to_list():
grouped_per_bycount[col] = grouped_per_bycount[col].apply(lambda x: (x/grouped_count[col].sum())*100)
g3 = grouped_per_bycount.loc[sorder,[i for i in range(1,8)]].T.plot.bar(stacked=True, ylabel='Percentage (%)')
g3.legend(bbox_to_anchor=(1, 1))
# log y of number og OGs responsive in respective number of stresses (x axis)
g = grouped_count.loc[sorder].T.plot.bar(logy=True, ylabel = 'Number of OGs')
g.legend(bbox_to_anchor=(1, 0.75))
countbysres = grouped_count.loc[sorder].T
countbysres.to_csv(wdir + 'countbystressres.txt', sep='\t')
# Mapman bins
import seaborn as sns
import math
merdict = literal_eval(open(dir_path + 'prep_files/merdict.txt', 'r').read())
# By Phylo
catdict = {} # Mapman bin count for Phylostrata that are stress responsive (Count > 0)
for cat in sorder:
catbins = [z for x in resog_df[(resog_df.Phylostrata == cat) & (resog_df.Count > 0)].Genes.to_list() for y in x for z in DGEbins[y]]
catdict[cat] = Counter(catbins)
catdf = pd.DataFrame.from_dict(catdict)
catdf.sort_index(inplace=True)
catdf.reset_index(inplace=True)
catdf.columns = ['Mapman bins'] + sorder
catdf['Mapman bins'] = catdf['Mapman bins'].apply(lambda x: merdict[x])
catdf.set_index('Mapman bins', inplace=True)
catperdf = catdf.copy()
catperlogdf = catdf.copy()
for x in catperdf.columns.to_list():
total = catdf[x].sum()
catperdf[x] = catdf[x].apply(lambda x: (x/total)*100)
catperlogdf[x] = catdf[x].apply(lambda x: math.log((x/total)*100,2))
catperdf.fillna(float(0), inplace=True)
f = sns.clustermap(catperdf, yticklabels=True, col_cluster=False) # to get linkage for logged values (percentages can be filled 0 but cannot fill NaN with 0 for logged values)
row_linkage = f.dendrogram_row.linkage
sns.clustermap(catperlogdf, yticklabels=True, col_cluster=False, row_linkage = row_linkage, cmap='coolwarm')
# By Stress responsiveness
countdict = {} # Mapman bin count for Phylostrata that are stress responsive (Count > 0)
for count in [i for i in range(1,8)]:
countbins = [z for x in resog_df[(resog_df.Count == count)].Genes.to_list() for y in x for z in DGEbins[y]]
countdict[str(count)] = Counter(countbins)
countdf = pd.DataFrame.from_dict(countdict)
countdf.sort_index(inplace=True)
countdf.reset_index(inplace=True)
countdf.columns = ['Mapman bins'] + [i for i in range(1,8)]
countdf['Mapman bins'] = countdf['Mapman bins'].apply(lambda x: merdict[x])
countdf.set_index('<NAME>', inplace=True)
# column normalised
countperdf = countdf.copy()
countperlogdf = countdf.copy()
for x in countperdf.columns.to_list():
total = countdf[x].sum()
countperdf[x] = countdf[x].apply(lambda x: (x/total)*100)
countperlogdf[x] = countdf[x].apply(lambda x: math.log((x/total)*100,2))
countperdf.fillna(float(0), inplace=True)
f2 = sns.clustermap(countperdf, yticklabels=True, col_cluster=False, cmap='coolwarm') # to get linkage for logged values (percentages can be filled 0 but cannot fill NaN with 0 for logged values)
row_linkage2 = f2.dendrogram_row.linkage
sns.clustermap(countperlogdf, yticklabels=True, col_cluster=False, row_linkage = row_linkage2, cmap='coolwarm')
# row normalised
countper_rownorm_df = countdf.copy()
countper_rownorm_logdf = countdf.copy()
for x in countper_rownorm_df.index.to_list():
total = countdf.loc[x].sum()
countper_rownorm_df.loc[x] = countdf.loc[x].apply(lambda x: (x/total)*100)
countper_rownorm_logdf.loc[x] = countdf.loc[x].apply(lambda x: math.log((x/total)*100, 2))
countper_rownorm_df.fillna(float(0), inplace=True)
f3 = sns.clustermap(countper_rownorm_df, yticklabels=True, col_cluster=False,cmap='coolwarm') # to get linkage for logged values (percentages can be filled 0 but cannot fill NaN with 0 for logged values)
row_linkage3 = f3.dendrogram_row.linkage
sns.clustermap(countper_rownorm_logdf, yticklabels=True, col_cluster=False,
row_linkage = row_linkage3, cmap='coolwarm', figsize=(5,6))
plt.savefig(dir_path + 'figures/fig3a', dpi=600)
# + [markdown] id="exDaka827fIh"
# ### Figure 4: Upset plot and summary of DEGs in Marchantia
# + id="A2YBe6KzehDL"
# Fig 4A and B (adapted from DGE_count_sizecorr.py)
cross = pd.read_csv(dir_path + 'prep_files/mpo/deseq/resSig_compiled.txt', sep='\t')
cross.stress = [x.split('_')[1] for x in list(cross.stress)]
stress_l = list(cross.stress.unique())
stress_l.sort(key=lambda x: len(x))
c_dgecount = cross.groupby(['stress', 'L2FC_D2']).count().gene.to_frame(name='count')
unstacked = c_dgecount.unstack().reindex(stress_l)
ax = unstacked.plot.bar(figsize=(7,3), stacked=True, ylabel='Number of DEGs', color=['navy', 'firebrick'])
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[::-1], labels=[x.split(', ')[1].split(')')[0] for x in labels][::-1])
plt.savefig(dir_path + 'figures/fig4a.png', dpi=600)
wdir = dir_path + 'prep_files/'
infile = 'phase1n2_measurements_nooutliers.txt'
measurements = pd.read_csv(wdir + infile, sep='\t')
def ss_grab(stress, condition):
"""
Slice the relevant condition for
Parameters
----------
stress : string
Stress of interest.
condition : string
Condition of interest.
Returns
-------
sssub : dataframe
df of single stress.
"""
sssub = measurements[(measurements.Stress == stress) & (measurements.Condition == condition)]
return sssub
srep_keys = [['Cold', '3'],
['Heat', '33'],
['Salt', '40'],
['Mannitol', '100'],
['Light', '435'],
['Dark', '3'],
['Nitrogen', '0']]
srepdf = measurements[(measurements.Stress == 'Cold') & (measurements.Condition == '3')]
for s, c in srep_keys[1:]:
srepdf = pd.concat([srepdf, ss_grab(s, c)])
crepdf = measurements[measurements.Condition == 'mixed']
m_nocon = pd.concat([srepdf, crepdf])
m_nocon.Stress = [x[0] if len(x) > 2 else x for x in m_nocon.Stress]
noHL = m_nocon[m_nocon.Stress != 'HL']
avg_meas = noHL.groupby('Stress').mean()[['Parea', 'Earea']]
avg_meas.reindex(stress_l)
totaldeg = cross.groupby(['stress']).count()['L2FC_D2']
totaldeg.reindex(stress_l)
avg_meas['totaldeg'] = totaldeg
avg_meas = avg_meas[['totaldeg', 'Parea', 'Earea']]
# size plots by df
ax = avg_meas.plot.scatter(x='totaldeg', y='Parea', color='orange', label='Area (Day 15)')
for ind, dat in avg_meas.iterrows():
ax.annotate(ind, (dat['totaldeg'], dat['Parea']),
xytext=(-4,-12), textcoords='offset points')
avg_meas.plot.scatter(x='totaldeg', y='Earea', color='navy', label='Area (Day 21)', ax=ax)
for ind, dat in avg_meas.iterrows():
ax.annotate(ind, (dat['totaldeg'], dat['Earea']),
xytext=(-4,-12), textcoords='offset points')
# size plot with regression
from scipy import stats
q_colnames = avg_meas.columns.to_list()
dicto = {'Parea' : 'Day 15', 'Earea' : 'Day 21'}
def plot_reg(df, title):
labels = []
col = q_colnames[0]
for col2 in q_colnames[1:]:
plt.scatter(col, col2, data=df)
#m, c = np.polyfit(df[col], df[col2], 1)
m, c, r_value, p_value, std_err = stats.linregress(df[col], df[col2])
for ind, dat in avg_meas.iterrows():
plt.annotate(ind, (dat[col], dat[col2]),
xytext=(-4,-12), textcoords='offset points')
plt.plot(df[col], m*df[col] + c)
labels.append(dicto[col2] + ' (R\u00b2: ' + str(round(r_value**2,2)) + ', p: ' + str(round(p_value, 2)) + ')')
plt.legend(labels)
plt.xlabel('DEG count')
plt.ylabel('Size (mm\u00b2)')
plt.savefig(dir_path + 'figures/fig4b.png', dpi=600)
plot_reg(avg_meas, 'Number of DEGs vs Size')
# + id="0NoknTLri0z9"
# Fig 4C and D (adapted from upset.py)
import upsetplot
from collections import defaultdict
wdir = dir_path + 'prep_files/mpo/deseq/'
odir = wdir + 'upset/'
odir_safe = dir_path_safe + 'prep_files/mpo/deseq/' + 'upset/'
if not os.path.exists(odir):
# !mkdir $odir_safe
data = pd.read_csv(wdir + 'resSig_compiled.txt', sep = '\t')
### FUNCTIONS ###
def upset_matrix(set_dict, stress_types):
upset_data_sub = upsetplot.from_contents({k: v for k, v in set_dict.items() if k in stress_types})
return upset_data_sub # , fig=None
def plot_selected(cond_dict, cond_list, filename, title, orient = "horizontal", cutoff=50):
df_set = upset_matrix(cond_dict, cond_list)
df_set = df_set.sort_index()
# preparation to output all data
index_names = list(df_set.index.names)
index_list = df_set.index.to_list()
set_count = Counter(index_list)
counter_list = [[k, v] for k, v in set_count.items()]
counter_list.sort(key = lambda x: x[1], reverse=True)
# writing output to file
with open(filename + "_matrix.txt", "w+") as ofile:
ofile.write("\t".join(index_names + ['count', 'genes']) + "\n")
for i in counter_list:
glist = df_set.loc[i[0],:].id.to_list()
ofile.write("\t".join([str(int(x)) for x in i[0]] + [str(i[1]), str(glist)]) + "\n")
# writing top 50 to file
set_cutoff = set_count.most_common(cutoff)
selection = [x[0] for x in set_cutoff]
with open(filename + "_top50.txt", "w+") as cfile:
cfile.write("\t".join([str(index_names), 'count', 'genes']) + "\n")
for i in range(len(set_cutoff)):
glist = df_set.loc[set_cutoff[i][0],:].id.to_list()
cfile.write("\t".join([str(set_cutoff[i][0]), str(set_cutoff[i][1]), str(glist)]) + "\n")
# selection for plotting
sel_matrix = df_set.loc[selection[0], :]
for i in range(1, len(selection)):
sel_matrix = sel_matrix + df_set.loc[selection[i],:]
upsetplot.plot(sel_matrix, orientation = orient, sort_by = 'cardinality')
plt.title(title, size=20)
if "upreg" in filename:
figname = 'c'
else:
figname = 'd'
plt.savefig(dir_path + 'figures/Fig4' + figname + '.png', dpi=600)
### END ###
# Reshape data to have for every category,
cond_dict_U = defaultdict(list) # genres_movies
cond_dict_D = defaultdict(list)
for index, row in data.iterrows():
if row['L2FC_D2'] == 'UP':
cond_dict_U[row['stress'].split("_")[1]].append(row['gene'])
elif row['L2FC_D2'] == 'DOWN':
cond_dict_D[row['stress'].split("_")[1]].append(row['gene'])
all_stress_list = [x.split('_')[1] for x in data.stress.unique()]
# initialise dictionaries of up and downregulated genes for each condition
cond_dict_U_set = dict()
cond_dict_D_set = dict()
for k, v in cond_dict_U.items():
cond_dict_U_set[k] = set(v)
for k, v in cond_dict_D.items():
cond_dict_D_set[k] = set(v)
# Plot horizontal (default)
plot_selected(cond_dict = cond_dict_D_set,
cond_list = all_stress_list,
filename = odir + "all_downreg",
title = "Downregulated genes")
plot_selected(cond_dict = cond_dict_U_set,
cond_list = all_stress_list,
filename = odir + "all_upreg",
title = "Upregulated genes")
# + [markdown] id="_ENdRwbI7iDw"
# ### Supp Figs 6 & 7, Figure 5: Inter-stress (Marchantia only) comparison
# + id="2DTxA1RKpwvR"
# Supp. figs 6 & 7 (adapted from indivenn_hm.py)
from collections import defaultdict
from matplotlib_venn import venn3
wdir = dir_path + 'prep_files/mpo/deseq/'
odir = dir_path + 'figures/'
data = pd.read_csv(wdir + 'resSig_compiled.txt', sep = '\t')
# Mercator bin conversion
dicto = literal_eval(open(dir_path + 'prep_files/merdict.txt', 'r').read())
all_s = [x.split("_")[1] for x in data.stress.unique()]
single = [x.split("_")[1] for x in data.stress.unique() if len(x.split("_")[1]) == 1]
cross = [x.split("_")[1] for x in data.stress.unique() if len(x.split("_")[1]) == 2]
data.annotation = data.annotation.apply(literal_eval)
data["mername"] = data.annotation.apply(lambda x: dicto[int(x[0][0].split('.')[0])])
dict_A = defaultdict(list)
dict_U = defaultdict(list)
dict_D = defaultdict(list)
def sum_to_dict(dicto, stress, reg):
if reg == "ALL":
subset = data[(data.stress == "Mpo_" + stress)]
else:
subset = data[(data.stress == "Mpo_" + stress) & (data.L2FC_D2 == reg)]
dicto[stress].append(set(subset.gene.to_list()))
dicto[stress].append(subset.mername.to_list())
def dict_to_df(dicto):
df = pd.DataFrame.from_dict(dicto, orient='index', columns=["gene", "mername"])
return df
for s in all_s:
#sum_to_dict(dict_A, s, "ALL")
sum_to_dict(dict_U, s, "UP")
sum_to_dict(dict_D, s, "DOWN")
#df_A = dict_to_df(dict_A)
df_U = dict_to_df(dict_U)
df_D = dict_to_df(dict_D)
def plot_venn(df, s1, s2, c1, title, axis):
venn3([df.loc[s1].gene, df.loc[s2].gene, df.loc[c1].gene],
(s1, s2, c1),
ax = axis)
axis.set_title(title, size=20)
# create subplots
xlen = 4
ylen = math.ceil(len(all_s)/4)
figw = xlen * 4
figh = ylen * 3.5
a_axes = string.ascii_uppercase[:len(all_s)]
def plot_subplot(df, title_ext):
axa = plt.figure(constrained_layout=True,
figsize=(figw, figh)).subplot_mosaic(
"""
ABCD
EFGH
IJKL
MNOP
QR..
"""
)
for c in range(len(cross)):
st = cross[c]
plot_venn(df, st[0], st[1], st, st + title_ext, axa[a_axes[c]])
plt.savefig(odir+'supp_fig6or7' + title_ext +'.png', dpi=600)
#df_col = [[df_A, ''], [df_U, "_upregulated"], [df_D, "_downregulated"]]
df_col = [[df_U, "_upregulated"], [df_D, "_downregulated"]]
for x in df_col:
df_type, ext = x[0], x[1]
plot_subplot(df_type, ext)
# + id="8EibR5DSslOq"
# Figure 5 (adapted from plot_venn_sum.py)
from collections import defaultdict, Counter
from matplotlib_venn import venn3, venn3_circles
import random
from scipy import stats
wdir = dir_path + 'prep_files/mpo/deseq/'
odir = wdir + 'indivenn_hm/'
data = pd.read_csv(wdir + 'resSig_compiled.txt', sep = '\t')
# Mercator
### DICTIONARY OF MERCATOR BINS ###
mfile = dir_path + 'mercator/MpoProt.results.txt'
meranno = defaultdict(list)
merbin = defaultdict(list)
map2anno = {}
merfile = open(mfile, 'r')
merfile.readline()
for line in merfile:
linecon = line.rstrip().replace("'", "").split("\t")
if len(linecon) == 5:
bincode, name, identifier, desc, ptype = linecon
meranno[identifier].append(dicto[int(bincode.split('.')[0])])
merbin[identifier].append('.'.join(bincode.split('.')[:2]))
if len(linecon[0].split('.')) == 2:
map2anno[linecon[0]] = linecon[1]
all_s = [x.split("_")[1] for x in data.stress.unique()]
single = [x.split("_")[1] for x in data.stress.unique() if len(x.split("_")[1]) == 1]
cross = [x.split("_")[1] for x in data.stress.unique() if len(x.split("_")[1]) == 2]
data.annotation = data.annotation.apply(literal_eval)
data["mername"] = data.annotation.apply(lambda x: [dicto[int(y[0].split('.')[0])] for y in x]) # different from cell above, hence the repetitive code
dict_A = defaultdict(list)
dict_U = defaultdict(list)
dict_D = defaultdict(list)
def sum_to_dict(dicto, stress, reg):
if reg == "ALL":
subset = data[(data.stress == "Mpo_" + stress)]
else:
subset = data[(data.stress == "Mpo_" + stress) & (data.L2FC_D2 == reg)]
dicto[stress].append(set(subset.gene.to_list()))
dicto[stress].append([y for x in subset.mername.to_list() for y in x])
dicto[stress].append(['.'.join(y[0].split('.')[:2]) for x in subset.annotation.to_list() for y in x])
def dict_to_df(dicto):
df = pd.DataFrame.from_dict(dicto, orient='index', columns=["gene", "mername", "mapbin2"])
return df
for s in all_s:
sum_to_dict(dict_A, s, "ALL")
sum_to_dict(dict_U, s, "UP")
sum_to_dict(dict_D, s, "DOWN")
df_A = dict_to_df(dict_A)
df_U = dict_to_df(dict_U)
df_D = dict_to_df(dict_D)
# =============================================================================
#
# # Summary of stress response
#
# =============================================================================
# Q1 : ji_cal(a, b) [%]
def ji_cal(a, b):
# jaccard index calculation
return len(a&b) / len(a|b)
# Q2: |(A − AB)/A − (B − AB)/B| [% difference]
def suppInX(a, b, ab):
return len((a-ab))/len(a) - len((b-ab))/len(b)
# Q3: (AB - A - B) / AB [%]
def novel(a, b, ab):
return len(ab - a - b) / len(ab)
def q_col(df, colnames):
"""
Collates the params for each cross stress and output in df
Parameters
----------
df : dataframe
dataframe to use (all genes, upreg/downreg only).
Returns
-------
q_df : dataframe
datafram containing JI of all cross stress.
"""
q_dict = {}
for c in range(len(cross)):
st = cross[c]
a = df.loc[st[0]].gene
b = df.loc[st[1]].gene
ab = df.loc[st].gene
q_dict[st] = [
ji_cal(a,b),
#perXInAB(a,ab),
#perXInAB(b,ab),
suppInX(a,b,ab),
#suppInAB(a,b,ab),
novel(a,b,ab)
]
q_df = pd.DataFrame.from_dict(q_dict, orient="index", columns = colnames)
return q_df
q_colnames = ["similarity", "suppression", "novel interaction"]
q_A, q_U, q_D = [q_col(df_A, q_colnames), q_col(df_U, q_colnames), q_col(df_D, q_colnames)]
qdf_col = [[q_U, "Upregulated DEGs"], [q_D, "Downregulated DEGs"]]
def plot_q_subplots(df, outerax):
ax = [axe[x] for x in outerax]
#plt.suptitle(title, fontsize=14)
for i, axis in enumerate(ax):
if q_colnames[i] == "suppression":
sns.heatmap(df[q_colnames[i]].to_frame().transpose(), cmap='coolwarm', ax=axis)
else:
sns.heatmap(df[q_colnames[i]].to_frame().transpose(), cmap='Blues', ax=axis)
axis.set_yticklabels([q_colnames[i]], rotation=0)
cbar = axis.collections[0].colorbar
minval = round(df[q_colnames[i]].min(),2)
maxval = round(df[q_colnames[i]].max(),2)
while round(minval*100,2) % 5 != 0:
minval += 0.01
while round(maxval*100,2) % 5 != 0:
maxval -= 0.01
cbar.set_ticks([minval, maxval])
def plot_reg(df, outerax):
labels = []
statscol = []
for i, col in enumerate(q_colnames[:-1]):
for j, col2 in enumerate(q_colnames[i+1:]):
outerax.scatter(col, col2, data=df)
m, c, r_value, p_value, std_err = stats.linregress(df[col], df[col2])
statscol.append([m, c, r_value, p_value, std_err])
# m, c = np.polyfit(df[col], df[col2], 1)
outerax.plot(df[col], m*df[col] + c)
labels.append(col[:3] + ' v ' + col2[:3] + ' ($\mathregular{R^{2}}$: '+str(round(r_value**2,1))+', p: ' + str('{:.2f}'.format(round(p_value,2))+')'))
outerax.legend(labels, fontsize="x-small")
return statscol
def dum_venn(a_b, c_a, c_b, c_ab, ax, col, title, ac=20, bc=20, cc=20):
'''
Parameters
----------
a_b : int
Size of A&B.
c_a : int
Size of A&C-B.
c_b : int
Size of B&C-A.
c_ab : int
Szie of C&(A&B).
ax : axis handle
Axis handle of subplot to plot into.
col : list
List containing lists of patch id and corresponding colour.
title:
ac : int, optional
Size of set a. The default is 20.
bc : int, optional
Size of set b. The default is 20.
cc : int, optional
Size of set c. The default is 20.
Returns
-------
None.
'''
# =============================================================================
# a_b = 8 # A&B
# c_a_b = 4 # C&A-B/ C&B-A
# c_ab = 3 # C&(A&B)
# =============================================================================
dum = list(string.ascii_uppercase + string.ascii_lowercase)
random.shuffle(dum)
a = set(dum[:20])
b = set(list(a)[:a_b] + [x for x in dum if x not in a][:bc-a_b])
c = set(list(a-b)[:c_a] +
list(b-a)[:c_b] + list(a&b)[:c_ab] +
[x for x in dum if x not in a and x not in b][:cc-c_a-c_b-c_ab])
v = venn3([a, b, c],
('A', 'B', 'AB'),
ax = ax) # ax = axis
venn3_circles([a, b, c], linewidth=1, color='k', ax=ax)
for i in col:
v.get_patch_by_id(i[0]).set_color(i[1])
for idx, subset in enumerate(v.subset_labels):
v.subset_labels[idx].set_visible(False)
ax.set_title(title, fontsize=16)
# =============================================================================
# #
# # Initialising subplot
# #
# =============================================================================
#figsize=(figw, figh)
top_mosaic = [["v1", "v2", "v3"]]
eq_mosaic = [ ["e1", "e2", "e3"] ]
middle_mosaic = [
["u1", "d1"],
["u2", "d2"],
["u3", "d3"]
]
bottom_mosaic = [["r1", "r2"]]
figw, figh = 11, 9
fig = plt.figure(figsize=(figw, figh))
axc = fig.subplot_mosaic(
top_mosaic,
gridspec_kw={
"bottom": 0.75,
"top": 1,
#"wspace": 0.5,
#"hspace": 0.5,
}
)
axd = fig.subplot_mosaic(
eq_mosaic,
gridspec_kw={
"bottom": 0.55,
"top": 0.8,
#"wspace": 0.5,
#"hspace": 0.5,
}
)
axe = fig.subplot_mosaic(
middle_mosaic,
gridspec_kw={
"bottom": 0.38,
"top": 0.6,
#"wspace": 0.5,
"hspace": 0.2,
}
)
axf = fig.subplot_mosaic(
bottom_mosaic,
gridspec_kw={
"bottom": 0,
"top": 0.3,
#"wspace": 0.5,
#"hspace": 0.5,
}
)
for axy in ['e1', 'e2', 'e3']:
axd[axy].axis('off')
for axy in ["v1", "v2", "v3"]:
axc[axy].set_anchor('N')
axd['e1'].text(0.39, 0.45, r"$\frac{A \cap B}{A \cup B}$", fontsize=20)
axd['e2'].text(0.04, 0.45, r"$\frac{A-B-AB}{A}-\frac{B-A-AB}{B}$", fontsize=20)
axd['e3'].text(0.29, 0.45, r"$\frac{AB-A-B}{AB}$", fontsize=20)
for seq, x in enumerate(qdf_col):
df_type, title = x[0], x[1]
plot_q_subplots(df_type, [x[seq] for x in middle_mosaic])
for axy in ['u1', 'u2', 'd1', 'd2']:
axe[axy].set_xticklabels([])
axe[axy].xaxis.set_visible(False)
for axy in ['d1', 'd2', 'd3']:
axe[axy].set_yticklabels([])
axe[axy].yaxis.set_visible(False)
v1col = [['100', 'white'], ['110', 'limegreen'],
['101', 'white'], ['111', 'limegreen'],
['010', 'white'], ['011', 'white'],
['001', 'white']]
v2col = [['100', 'red'], ['110', 'white'],
['101', 'white'], ['111', 'white'],
['010', 'cornflowerblue'], ['011', 'white'],
['001', 'white']]
v3col = [['100', 'white'], ['110', 'white'],
['101', 'white'], ['111', 'white'],
['010', 'white'], ['011', 'white'],
['001', 'darkorchid']]
dum_venn(a_b=8,c_a=4, c_b = 4,c_ab=3,
ax=axc['v1'], col=v1col, title='Similarity')
dum_venn(a_b=8, c_a=10, c_b=6, c_ab=3,
ax=axc['v2'], col=v2col, title='Suppression')
dum_venn(a_b=8, c_a=4, c_b=4, c_ab=3,
ax=axc['v3'], col=v3col, title='Novel interaction')
reg_stats = []
for i, x in enumerate(qdf_col):
df_type, title = x[0], x[1]
df_abs = df_type[:]
df_abs.suppression = abs(df_abs.suppression)
reg_stats.append(plot_reg(df_abs, axf[bottom_mosaic[0][i]]))
plt.savefig(dir_path+'figures/Fig5A_E.png', dpi=600)
# + id="zd8VylDwyLWn"
# Fig 5F refer to l2_en_jaccard_hm.png
# =============================================================================
#
# Summary: What is the dominant effect of each stress?
#
# =============================================================================
def sum_df(df):
dicto_sum = {}
for ss in single:
ori = [x for x in cross if ss in x]
relcross = [y for x in cross if ss in x for y in x if ss not in y]
sim = [ji_cal(df.loc[x[0]].gene, df.loc[x[1]].gene) for x in ori]
nov = [novel(df.loc[x[0]].gene, df.loc[x[1]].gene, df.loc[x].gene) for x in ori]
sup = [suppInX(df.loc[ss].gene, df.loc[x].gene, df.loc[ori[i]].gene) for i, x in enumerate(relcross)]
for i, x in enumerate(relcross):
dicto_sum[ss+x] = [ss, sim[i], sup[i], nov[i]]
df_sum= pd.DataFrame.from_dict(dicto_sum, orient='index', columns=['stress', 'similarity', 'suppression', 'novel'])
return df_sum
cond=['similarity', 'suppression', 'novel']
df_list = [sum_df(df_U), sum_df(df_D)]
# figs, axs = plt.subplots(3,2,
# sharex=True,
# sharey='row',
# constrained_layout=True,
# figsize=(7,6))
# for i, x in enumerate(df_list):
# for j, y in enumerate(cond):
# sns.violinplot(x='stress', y= y, data=x, ax= axs[j][i])
# for k in range(3):
# axs[k][1].set_ylabel('')
# for l in range(2):
# for m in range(2):
# axs[l][m].set_xlabel('')
# axs[0][0].set_title('Upregulated', fontsize=14)
# axs[0][1].set_title('Downregulated', fontsize=14)
# plt.savefig(odir + 'venn_sum.png', dpi=600)
# =============================================================================
#
# Enrichment
#
# =============================================================================
from statsmodels.stats.multitest import multipletests
import math
import numpy as np
ori_count = Counter([y for x in list(meranno.values()) for y in x])
mapbins = list(dicto.values())
def sig_df(df, sigcol, merdict, mapbins):
"""
Calculates and correct mapman bin enrichment p-value for all stresses
Returns dataframe
Parameters
----------
df : dataframe
df containing genes and corresponding mapman bins of DEGs.
sigcol : str
column name to use for enrichment
merdict : dict
corresponding dictionary of mapman annotation/ 2nd level bins to use
mapbins : list
list of mapman annotation/bins to use
Returns
-------
df_sig : dataframe
df summarising enrichment (corrected p-value) for each mapman bin (row)
and each stress (column).
"""
sig_sum = {}
for s in all_s:
s_count = Counter(df.loc[s][sigcol])
valid_bins = list(s_count.keys()) # bins found in stress
# initialise count dicitonary
sig_count = {}
for key in valid_bins:
sig_count[key] = 1
# random simulations
for i in range(1000):
shuffle = list(merdict.values())
random.shuffle(shuffle)
sub = shuffle[:len(df.loc[s].gene)]
sub_count = Counter([y for x in sub for y in x])
for mapman in valid_bins:
if sub_count[mapman] >= s_count[mapman]:
sig_count[mapman] += 1
# p-value calculation
pval_coll = []
for mapman in valid_bins:
pval = sig_count[mapman]/1000
# correction for pval > 1
if pval <= 1:
pval_coll.append(pval)
else:
pval_coll.append(float(round(pval)))
# BH correction for multiple testing
y = multipletests(pvals=pval_coll, alpha=0.05, method="fdr_bh")[1]
all_bins_corr_pval = []
for mapman in mapbins:
if mapman in valid_bins:
all_bins_corr_pval.append(y[valid_bins.index(mapman)])
else:
all_bins_corr_pval.append(None)
sig_sum[s] = all_bins_corr_pval
df_sig = pd.DataFrame.from_dict(sig_sum, orient='index', columns=mapbins)
return df_sig
def chunk(uval, dval):
if math.isnan(uval) and math.isnan(dval):
# not differentially regulated
cat = 0
elif uval >= 0.05 and (dval >= 0.05 or math.isnan(dval)):
# not enriched
cat = 0
elif dval >= 0.05 and (uval >= 0.05 or math.isnan(uval)):
# not enriched
cat = 0
elif uval < 0.05 and dval < 0.05:
# differentially up and downregulated in bin
cat = 2
elif dval < 0.05:
# differentially downregualted
cat = 1
elif uval < 0.05:
# differentially upregulated
cat = 3
return cat
from matplotlib.colors import ListedColormap
cmap = ListedColormap(["lightgray", "royalblue", "violet", "firebrick"])
catno = 4
cbarticks = [(x/(catno*2))*(catno-1) for x in range(1,catno*2,2)]
# =============================================================================
#
# Enrichment (Part 2: 2nd level Mapman)
#
# =============================================================================
mapbins2 = list(set([y for x in list(merbin.values()) for y in x]))
mapbins2.sort(key=lambda x: (int(x.split('.')[0]), int(x.split('.')[1])))
df_sig_U2 = sig_df(df_U, 'mapbin2', merbin, mapbins2)
df_sig_D2 = sig_df(df_D, 'mapbin2', merbin, mapbins2)
df_sig_U2 = df_sig_U2.fillna(value=np.nan)
df_sig_D2 = df_sig_D2.fillna(value=np.nan)
cat_dict2 = {}
for mapman in list(df_sig_U2.columns):
cat_col = []
for stress in list(df_sig_U2.index):
uval, dval = df_sig_U2.loc[stress, mapman], df_sig_D2.loc[stress, mapman]
cat_col.append(chunk(uval,dval))
cat_dict2[mapman] = cat_col
df_combined_sig2 = pd.DataFrame.from_dict(cat_dict2, orient='index', columns=list(df_sig_U2.index))
df_combined_sig2 = df_combined_sig2.loc[df_combined_sig2.max(axis=1) > 0,:]
df_combined_sig2 = df_combined_sig2.loc[(df_combined_sig2 > 0).sum(axis=1) >2,:]
df_combined_sig2.reset_index(inplace=True)
df_combined_sig2['index'] = df_combined_sig2['index'].apply(lambda x: map2anno[x])
df_combined_sig2.set_index('index', inplace=True)
# =============================================================================
#
# Plotting 2nd level mapman enrichment (df_combined_sig2)
#
# =============================================================================
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial.distance import squareform
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
def jdistprep(df, axis):
'''
Convert df to sets (for calculation of JD of X axis)
Parameters
----------
df : dataframe
dataframe of categorical variables to be converted to sets.
axis : int
axis to do sets on, 0 by column (default), 1 by row
Returns
-------
dicto : dict
dictionary containing list of column values.
'''
if axis == 1:
df = df.T
dxkeys = df.columns.to_list()
dykeys = df.index.to_list()
dicto = {}
for col in dxkeys:
dicto[col] = [dykeys[i] + '_' + str(x) for i, x in enumerate(df[col].to_list())]
return [dicto, dxkeys]
def jdist(df, axis=0):
'''
Construct jaccard distance square matrix
Parameters
----------
df : df
dataframe to be used for jiprep/ jdist calculation.
axis : int
axis to do sets on, 0 by column (default), 1 by row
Returns
-------
linkage_matrix : list
condensed jaccard distance matrix.
jlist : list
list of list (jaccard distance square matrix)
dicto : dict
dictionary of list
'''
dicto, dxkeys = jdistprep(df, axis)
jlist = []
for key in dxkeys:
col = []
for key2 in dxkeys:
set1, set2 = dicto[key], dicto[key2]
set1x = set([x for x in set1 if x.split('_')[1] != '0'])
set2x = set([x for x in set2 if x.split('_')[1] != '0'])
col.append(1 - ji_cal(set1x, set2x))
jlist.append(col)
dists = squareform(jlist)
linkage_matrix = linkage(dists, "single")
return linkage_matrix, jlist, dicto
def plot_dendro(linkage_matrix, ax, orient):
'''
Plots dendrogram into subplot
Parameters
----------
mat : list of lists
Contains the square matrix of jaccard distances.
ax : axes
axis of subplot to plot to.
orient : str
orientation of dendrogram to be plotted.
Returns
-------
None.
'''
dendrogram(linkage_matrix, no_labels=True, ax=ax, orientation=orient, color_threshold=0, above_threshold_color='#000000')
xmat, xlist, xdict = jdist(df_combined_sig2)
ymat, ylist , ydict = jdist(df_combined_sig2, axis=1)
yden = dendrogram(ymat, labels=df_combined_sig2.index.to_list(), orientation='left') #, color_threshold=0, above_threshold_color='#000000'
plt.show()
xden = dendrogram(xmat, labels=df_combined_sig2.columns.to_list(), orientation='top') #, color_threshold=0, above_threshold_color='#000000'
plt.show()
yorder = yden['ivl']
xorder = xden['ivl']
df_sig2_reordered = df_combined_sig2[xorder]
df_sig2_reordered = df_sig2_reordered.reindex(yorder[::-1])
fig, ax = plt.subplots(2,2,
figsize=(7.5,8.5), # (width, height)
constrained_layout=True,
gridspec_kw={'width_ratios': [1.5, 5],'height_ratios': [1, 5]}) # constrained_layout=True,
ax0, ax1, ax2, ax3 = ax.flatten()
for i in [ax0, ax1, ax2]:
i.axis('off')
plot_dendro(xmat, ax1, 'top')
plot_dendro(ymat, ax2, 'left')
# heatmap, tick and tick labels
hplot = ax3.imshow(df_sig2_reordered, cmap=cmap)
ax3.yaxis.tick_right()
ax3.set_ylabel("")
ax3.set_xticks(np.arange(0, len(df_sig2_reordered.columns), 1))
ax3.set_yticks(np.arange(0, len(df_sig2_reordered), 1))
xcolour = ['k'] + ['firebrick']*4 + ['gray']*6 + ['mediumseagreen']*4 + ['k']*2 + ['darkorange']*3 + ['k']*2 + ['royalblue']*3
ax3.set_xticklabels(df_sig2_reordered.columns.to_list(), rotation=90)
for i, tick_label in enumerate(ax3.get_xticklabels()):
tick_text = tick_label.get_text()
tick_label.set_color(xcolour[i])
anno_long = ['annotated', 'cellulose', 'biosynthesis', 'hemicellulose', 'pectin', 'channels', 'degradation']
ax3.set_yticklabels([(lambda x: x.split('.')[1].lower() if x.split('.')[1] not in anno_long else x.lower())(x) for x in df_sig2_reordered.index.to_list()])
# cbar plotting and control
axins = inset_axes(ax0,
width="40%", # width = 50% of parent_bbox width
height="90%", # height : 5%
loc = 'center')
cbar = fig.colorbar(hplot, cax=axins, ticks = cbarticks)
cbar.ax.set_yticklabels(['N', 'D', 'UD', 'U'])
plt.savefig(dir_path+'figures/Fig5F.png', dpi=600, bbox_inches='tight') # no N
# + [markdown] id="xuuwbZc27lDH"
# ### Figure 6: Diurnal gene expression
# + id="GAgDmV7kdQXn"
#Fig 6 A to D, adapted from Mpo_panel1.py
from scipy.stats import zscore
wdir = dir_path + "diurnal/"
Mpodf = pd.read_csv(wdir + "Mpo_supp.txt", sep = "\t", index_col = 0)
Mpo_exp_only = Mpodf[Mpodf.LAG != "NE"]
Mpo_rhy_only = Mpo_exp_only[Mpo_exp_only.LAG != "NR"]
perall = (len(Mpo_rhy_only) / len(Mpodf))*100
perexp = (len(Mpo_rhy_only) / len(Mpo_exp_only))*100
# Subset rhythmic genes only
Mpo_rhy_only.LAG = Mpo_rhy_only.LAG.astype(int)
Mpo_rhy_only["ADJ.P"] = Mpo_rhy_only["ADJ.P"].astype(np.float)
Mpo_rhy_only.sort_values(["LAG", "ADJ.P"], inplace=True)
# normalisation of rhythmic gene expression
rhy_zscore = Mpo_rhy_only[Mpo_rhy_only.columns.to_list()[Mpo_rhy_only.columns.to_list().index("ZT2_1"):]]
rhy_zscore = rhy_zscore.transpose()
rhy_zscore = rhy_zscore.apply(zscore)
rhy_zscore = rhy_zscore.transpose()
plt.figure(figsize=(10,20))
"""
Panel 1a) plot
"""
colnames = [x.split("_")[0] for x in rhy_zscore.columns.to_list()]
condcoldict = {}
for x in list(set(colnames)):
if int(x.split("ZT")[1]) < 12:
condcoldict[x] = (255,255,153) #"khaki"
else:
condcoldict[x] = (160,160,160) #"lightslategrey"
condcol = np.array([[condcoldict[x] for x in colnames]])
fig, ax = plt.subplots(2,1,
figsize=(4,8), # (width, height)
gridspec_kw={'height_ratios': [0.091, 3.9]})
fig.subplots_adjust(hspace=0.01)
ax1, ax2= ax.flatten()
ax1.imshow(condcol)
# Set gridlines
ax1.set_xticks(np.arange(-.5, 18, 3))
ax1.set_yticks(np.arange(-.5, 1, 1))
ax1.grid(color='k', linestyle='-', linewidth=1)
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax1.xaxis.set_ticks_position('none')
ax1.yaxis.set_ticks_position('none')
ax1.set_anchor('W')
sns.heatmap(rhy_zscore, cmap = "coolwarm", ax = ax2, yticklabels=False, xticklabels=True, center=0)
ax2.set_ylabel("Genes")
plt.savefig(dir_path + 'figures/Fig6A.png', dpi = 600)
plt.show()
"""
Panel 1B) plot
"""
rhycount = len(Mpo_rhy_only)
LAGcount = Mpo_rhy_only.groupby("LAG").count().annotation.to_frame()
LAGcount.columns = ["count"]
LAGcount["percent"] = LAGcount.apply(lambda x: (x/rhycount)*100)
g = LAGcount.percent.plot(xticks = LAGcount.index.to_list(),
yticks = [0, 18],
ylim = [0,18],
ylabel = "% rhythmic genes",
xlabel = "Phase",
color = "k")
g.axvline(12, color = "k")
plt.savefig(dir_path + 'figures/Fig6B.png', dpi = 600)
plt.show()
"""
Panel 1C) plot
"""
fp = dir_path + "diurnal/Ferrari_2019/SD14_compat.txt"
qp = dir_path + "diurnal/Ferrari_2019/OF_20210623_compat.tsv"
camortho = pd.read_csv(fp, sep="\t", index_col=0)
camortho.Ath = camortho.Ath.str.upper()
qortho = pd.read_csv(qp, sep="\t", index_col=0)
qortho.Osa = qortho.Osa.str.replace("\.[0-9]*", "", regex=True)
camgrps = ["OG0000156", "OG0000215", "OG0000679", "OG0004739", "OG0004944", "OG0005370"]
qgrps = ["OG0000167", "OG0000301", "OG0000399", "OG0003516", "OG0004855", "OG0004502"]
cgrpdict = {"OG0000156":"OG0000167 (Cyclin A, B)",
"OG0000215":"OG0000301 (Cyclin D)",
"OG0000679":"OG0000399 (CDK)",
"OG0004739":"OG0003516 (Timeless)",
"OG0004944":"OG0004855 (DNA primase)",
"OG0005370":"OG0004502 (DNA polymerase)"}
mpogrpgenes = {}
for i in range(len(camgrps)):
testc = camortho.loc[camgrps[i],:].to_list()
testc = [x.split(", ") for x in testc if type(x) != float]
testcs = [x for a in testc for x in a]
testq = qortho.loc[qgrps[i],:].to_list()
testq = [x.split(", ") for x in testq if type(x) != float]
testqs = [x for a in testq for x in a]
c_s = set(testcs) - set(testqs)
print(camgrps[i])
print(str(list(c_s)))
q_s = list(set(testqs) - set(testcs))
mpogrpgenes[cgrpdict[camgrps[i]]] = [x for x in q_s if "Mp" in x]
mpocyclegenes = [x for a in list(mpogrpgenes.values()) for x in a if x in rhy_zscore.index]
timepoints = ["ZT2", "ZT6", "ZT10", "ZT14", "ZT18", "ZT22"]
# dataframe normalised timepoints for mpocyclegenes only
cycle_zscore = rhy_zscore.loc[mpocyclegenes].transpose()
# dictionary of mpocyclegenes and their corresponding OG information
cycle_grp = {}
for k, v in mpogrpgenes.items():
for item in v:
cycle_grp[item] = k
qwOGgrps = list(mpogrpgenes.keys())
# to transpose and create new df that contains the average zscore of replicates
cycle_dict = {}
for t in timepoints:
cycle_dict[t] = cycle_zscore.loc[[t+"_1", t+"_2", t+"_3"],:].mean().to_list()
cycle_df = pd.DataFrame(cycle_dict, columns = timepoints, index = [x + ": " + cycle_grp[x] for x in cycle_zscore.columns.to_list()]).transpose()
colours = ["maroon", "orangered", "forestgreen", "midnightblue", "mediumorchid", "steelblue", "darkseagreen"]
cycle_cols = {}
for keys in cycle_grp.keys():
cycle_cols[keys + ": " + cycle_grp[keys]] = colours[qwOGgrps.index(cycle_grp[keys])]
cycle_df.plot().legend(bbox_to_anchor=(0.81, -0.1))
cycle_df.plot(color = cycle_cols).legend(bbox_to_anchor=(0.81, -0.1))
plt.savefig(dir_path + 'figures/Fig6C.png',
dpi = 600,
bbox_inches='tight')
"""
Panel 1D) Mercator by phase
"""
import seaborn as sns
Mpo_rhy_only["MapMan bins"] = Mpo_rhy_only.apply(lambda x: x.annotation.split(".")[0].capitalize(), axis=1)
mer_grouped = Mpo_rhy_only.groupby(["MapMan bins", "LAG"]).count().annotation.unstack(fill_value=0)
phases = mer_grouped.columns.to_list()
binsum = mer_grouped.sum(axis=1)
for phase in phases:
mer_grouped[phase] = mer_grouped[phase]/binsum
g = sns.clustermap(mer_grouped)
plt.show()
g_ytick = [str(x).split("'")[1] for x in g.ax_heatmap.get_yticklabels()]
drow = g.dendrogram_row.linkage
mer_grouped_reordered = mer_grouped.reindex(g_ytick)
sns.heatmap(mer_grouped_reordered, yticklabels=True)
mer_grouped.transpose().plot().legend(bbox_to_anchor=(0.72, -0.1))
# subplots with linkage
mer_grouped_dendro = mer_grouped.reindex(g_ytick[::-1])
from scipy.cluster.hierarchy import dendrogram
figii, axii = plt.subplots(1,2,
figsize=(10,6), # (width, height
constrained_layout=True,
gridspec_kw={'width_ratios': [1.9, 8.1]})
ax1ii, ax2ii= axii.flatten()
ax1ii.axis("off")
dendrogram(drow, no_labels=True, ax=ax1ii, orientation='left', color_threshold=0, above_threshold_color='#000000')
sns.heatmap(mer_grouped_dendro, yticklabels=True, ax = ax2ii)
ax2ii.set_ylabel("")
plt.show()
# to plot heatmap by chunks
def chunk(num):
if num == 0:
cat = 0
elif num < 0.1:
cat = 1
elif num < 0.2:
cat = 2
elif num < 0.3:
cat = 3
elif num < 0.4:
cat = 4
else:
cat = 5
return cat
mer_grouped_chunk = mer_grouped.reindex(g_ytick)
for col in mer_grouped_chunk:
mer_grouped_chunk[col] = mer_grouped_chunk[col].apply(lambda x: chunk(x))
sns.heatmap(mer_grouped_chunk, yticklabels=True)
"""
Mercator count binned by percentage with custom colormap
"""
from matplotlib.colors import ListedColormap
figm, (axm1, axm2) = plt.subplots(1,2,
figsize=(6.3,6), # (width, height
constrained_layout=True,
gridspec_kw={'width_ratios': [1.6, 8.4]})
axm1.axis("off")
dendrogram(drow,
no_labels=True,
ax=axm1, orientation='left',
color_threshold=0,
above_threshold_color='#000000')
cmap = ListedColormap(["gray", "lightsteelblue", "lightgreen", "palegoldenrod", "coral", "indianred"])
mplot = axm2.imshow(mer_grouped_chunk, cmap=cmap, interpolation="none")
axm2.set_xticks(np.arange(0, len(mer_grouped_chunk.columns), 1))
axm2.set_yticks(np.arange(0, len(mer_grouped_chunk), 1))
axm2.set_xticklabels(mer_grouped_chunk.columns.to_list())
axm2.set_yticklabels(mer_grouped_chunk.index.to_list())
cbar = figm.colorbar(mplot,
ax=axm2,
ticks = [x/12 for x in np.arange(5,6*10,10)],
label="% rhythmic genes in bin")
#cbar = fig.colorbar(cax, ticks=[-1, 0, 1])
cbar.ax.set_yticklabels(['None', '0-9%', '10-19%', '20-29%', '30-39%', '>40%']) # vertically oriented colorbar
plt.savefig(dir_path + 'figures/Fig6D.png', dpi = 600)
# + id="DXuBGD65hn7R"
# Fig 6E and F, adapted from 1to1ortho.py
Mpo_exp_only = Mpodf[Mpodf.LAG != "NE"]
Mpo_rhy_only = Mpo_exp_only[Mpo_exp_only.LAG != "NR"]
Mpo_rhy_genes = Mpo_rhy_only.index.to_list()
species = ["Cpa", "Ppu", "Cre", "Kni", "Ppa", "Smo", "Pab", "Osa", "Ath"]
night = [8, 12, 12, 12, 8, 12, 8, 6, 6]
daylength = [16, 12, 12, 12, 16, 12, 16, 6, 6]
freq = [1, 1, 1, 1, 1, 1, 1, 2, 2]
odir = dir_path + "diurnal/Orthologues_Mpo/"
ofiles = ["Mpo__v__" + x + ".tsv" for x in species]
camdir = dir_path + "diurnal/"
camfiles = [x + "_supp.txt" for x in species]
### FUNCTION ###
def lag_diff(a, b):
"""
Parameters
----------
a : int
LAG value of species X.
b : int
LAG value of Mpo.
Returns
-------
diff : int
smallest LAG diff.
"""
diff = a - b
if abs(diff) > 12:
if diff < 0:
diff = diff + 24
else:
diff = diff - 24
return diff
### END ###
f_axes = string.ascii_uppercase[:len(species)]
d_axes = string.ascii_lowercase[:len(species)]
axd = plt.figure(constrained_layout=True,
figsize=(27,6)).subplot_mosaic(
"""
abcdefghi
ABCDEFGHI
"""
)
#for spe in species:
# get 1 to 1 orthologue
for z in range(len(species)):
ol_df = pd.read_csv(odir + ofiles[z], sep="\t", index_col=0)
ol_df = ol_df[ol_df.Mpo.apply(lambda row: len(row.split(", ")) ==1) & ol_df[species[z]].apply(lambda row: len(row.split(", ")) ==1)]
if species[z] == "Osa":
osa_dict = {}
osa_genes = ol_df.Osa.to_list()
for gene in osa_genes:
osa_dict[gene] = gene.split(".")[0]
ol_df.Osa.replace(osa_dict, inplace=True)
# get LAGs
cam_f = pd.read_csv(camdir + camfiles[z], sep="\t", index_col=0)
# Mpo LAG
ol_df["Mpo_LAG"] = ol_df.apply(lambda row: Mpodf.LAG.loc[row.Mpo], axis=1)
ol_df[species[z] + "_LAG"] = ol_df.apply(lambda row: cam_f.phase.get(row[species[z]], None), axis=1)
# exclude NE and NR in either Mpo or species[z] LAG
for i in ["Mpo", species[z]]:
for j in ["NE", "NR"]:
ol_df = ol_df[ol_df[i + "_LAG"] != j]
ol_df = ol_df[ol_df[i + "_LAG"].notna()]
# correcting LAG value 24 to 0 and converting to numeric
ol_df[species[z] + "_LAG"] = ol_df[species[z] + "_LAG"].replace({"24":"0"})
ol_df.Mpo_LAG = pd.to_numeric(ol_df.Mpo_LAG)
ol_df[species[z] + "_LAG"] = pd.to_numeric(ol_df[species[z] + "_LAG"])
# calculating smallest lag diff
ol_df["LAG_diff"] = ol_df.apply(lambda row: lag_diff(row[species[z] + "_LAG"], row.Mpo_LAG), axis=1)
diff_ser = ol_df.groupby("LAG_diff").count().Mpo_LAG
max_diff = diff_ser[diff_ser == diff_ser.max()].index.to_list()
# Plot LAG diff
axd[f_axes[z]].set_xticks(np.arange(-.5,len(ol_df.LAG_diff.unique())-1))
diff_ser_index = diff_ser.index.to_list()
diff_xticks = []
if len(diff_ser_index) > 13:
for i in range(len(diff_ser_index)):
if i%2 == 0:
diff_xticks.append(str(diff_ser_index[i]))
else:
diff_xticks.append("")
else:
diff_xticks = diff_ser_index
sns.histplot(ol_df.LAG_diff,
#x=diff_xticks,
bins=len(ol_df.LAG_diff.unique()),
kde=True,
ax=axd[d_axes[z]],)
if z != 0:
axd[d_axes[z]].set_ylabel("")
elif z == 0:
axd[d_axes[z]].set_ylabel("Count", fontsize=14)
axd[d_axes[z]].set_xlabel("")
# Plot for 1 to 1 ortho
mpo_tp = list(range(0,24,2)) # x-axis
full_other_tp = list(ol_df[species[z] + "_LAG"].unique())
other_tp = list(range(0,24,freq[z])) # y-axis
sum_dict = {}
for o in other_tp:
o_col = []
for m in mpo_tp:
o_col.append(sum((ol_df.Mpo_LAG == m) & (ol_df[species[z] + "_LAG"] == o)))
sum_dict["ZT" + str(o)] = o_col
sum_df = pd.DataFrame(sum_dict, columns = ["ZT" + str(y) for y in other_tp], index = ["ZT" + str(x) for x in mpo_tp])
axd[f_axes[z]].imshow(sum_df, cmap="Blues", aspect="auto")
axd[f_axes[z]].set_xticks(np.arange(-.5,len(other_tp)-1))
axd[f_axes[z]].set_yticks(np.arange(-.5,len(mpo_tp)-1))
xticklist = [other_tp[0]] + ["" for x in range(0,daylength[z]-1)] + [other_tp[daylength[z]]] + ["" for x in range(0,night[z]-2)] + [other_tp[-1]]
yticklist = [mpo_tp[0]] + ["" for x in range(0,6-1)] + [mpo_tp[6]] + ["" for x in range(0,6-2)] + [mpo_tp[-1]]
axd[f_axes[z]].set_xticklabels(xticklist)
axd[f_axes[z]].set_yticklabels(yticklist)
axd[f_axes[z]].axvline(daylength[z]-0.5, color="k")
axd[f_axes[z]].axhline(6-0.5, color="k")
axd[f_axes[z]].set_xlabel(species[z], fontsize=14)
if z != 0:
axd[f_axes[z]].set_yticks([])
axd[f_axes[z]].set_yticklabels([])
elif z == 0:
axd[f_axes[z]].set_ylabel("Mpo", fontsize=14)
plt.savefig(dir_path + 'figures/Fig6E_F.png', dpi = 600)
# + [markdown] id="RdmYRmdc7oA4"
# ### Supp. Fig 2: QC of RNA-seq data
# + id="ZUjfF_uuYTGW"
# adapted from QC_scaled_updated.py
from sklearn.preprocessing import StandardScaler
from scipy.stats import pearsonr
o_dir = dir_path + 'figures/'
sumdir = dir_path + 'summary_files/'
expdesc = ['all_stress', 'diurnal_exp', 'single_stress']
targetexp = expdesc[0]
targetp = sumdir + targetexp + '.txt'
expmatp = dir_path + 'prep_files/' + targetexp + '.tsv'
exps = [x.split("\t")[0] for x in open(targetp, "r").readlines()]
labels = [x.strip().split("\t")[1] + '_' + x.split("\t")[0].split('_')[1] for x in open(targetp, "r").readlines()]
df = pd.read_csv(expmatp, index_col = 0, sep = "\t", header = 0)
df.columns = labels
# Standard Scaling
scaled_features = StandardScaler().fit_transform(df.values)
df_scaled = pd.DataFrame(scaled_features, index = df.index, columns = df.columns)
# plot cluster map
sns.set(font_scale=1.6)
methods = "average"
g1 = sns.clustermap(df_scaled.corr(),
method = methods,
figsize=(20,20),
xticklabels=True,
yticklabels=True)
plt.title("All stress (scaled): " + methods)
plt.savefig(o_dir + "SuppFig2" + '.png')
# PCC of experiments
pcc_out = open(dir_path + "prep_files/mpo/all_stress_PCC.txt", "w+")
pcc_out.write("exp1\texp2\tpcc_val\tp_value\n")
exps = list(df_scaled.columns)
for exp1 in range(len(exps)):
for exp2 in range(exp1):
if exps[exp1].split("_")[0] == exps[exp2].split("_")[0]:
pcc_val, p_value = pearsonr(df_scaled[exps[exp1]], df_scaled[exps[exp2]])
pcc_out.write(exps[exp1] + "\t" + exps[exp2] + "\t" + str(pcc_val) + "\t" + str(p_value) + "\n")
pcc_out.close()
# + [markdown] id="cTKSAfIj7rcq"
# ### Supp. Fig 3: Volcano plots (DESeq2)
# + id="5Cp2Q3EKrCD3"
# adapted from deseq_volcano.py
wdir = dir_path + 'prep_files/mpo/deseq/'
odir = wdir + 'volcano/'
deseqouts = [x for x in os.listdir(wdir) if "res.tsv" in x] # controlD2controlH2_res.tsv
control = 'controlD2controlH2_res.tsv'
deseqouts.pop(deseqouts.index(control))
all_stress = list(set([x.split('control')[0] for i, x in enumerate(deseqouts)]))
all_stress.sort()
s_stress = [x for x in all_stress if len(x) == 1]
c_stress = [x for x in all_stress if len(x) == 2]
all_stress = s_stress + c_stress
# plot control
controls = pd.read_csv(wdir + control,
sep = "\t", header = 0, index_col = 0)
sns.scatterplot(x = controls['log2FoldChange'],
y = -np.log10(controls["padj"]),
#ax = axs[axcord[0][0], axcord[0][1]],
alpha = 0.2,
marker = '.',
legend = False,
edgecolor = "none",
hue = np.logical_and(abs(controls['log2FoldChange']) > 1,
-np.log10(controls["padj"]) > -np.log10(0.05)))
plt.title("control " + control.split("control")[1] + " vs control H2")
# plot everything else
xlen = 5
ylen = math.ceil(len(deseqouts)/5)
fig, axs = plt.subplots(ylen, xlen, figsize=(30, 37.5), sharex='col', sharey='row')
#sns.set(font_scale=1.6)
axcord = []
for a in range(ylen):
for b in range(xlen):
axcord.append([a, b])
for i, z in enumerate(all_stress):
files = [x for x in deseqouts if x.startswith(z+'control')]
files.sort()
fileD2 = pd.read_csv(wdir + files[0],
sep = "\t", header = 0, index_col = 0)
fileH2 = pd.read_csv(wdir + files[1],
sep = "\t", header = 0, index_col = 0)
D2ax = int(((i*2)//10)*10 + ((i*2)%10)/2)
H2ax = int(D2ax + 5)
# Volcano plots
# against control D2
sns.scatterplot(x = fileD2['log2FoldChange'],
y = -np.log10(fileD2["padj"]),
ax = axs[axcord[D2ax][0], axcord[D2ax][1]],
alpha = 0.2,
marker = '.',
legend = False,
edgecolor = "none",
hue = np.logical_and(abs(fileD2['log2FoldChange']) > 1,
-np.log10(fileD2["padj"]) > -np.log10(0.05)))
axs[axcord[D2ax][0], axcord[D2ax][1]].set_title(z + " vs control D2")
# against control H2
sns.scatterplot(x= fileH2['log2FoldChange'],
y = -np.log10(fileH2["padj"]),
ax = axs[axcord[H2ax][0], axcord[H2ax][1]],
alpha = 0.2,
marker = '.',
legend = False,
edgecolor = "none",
hue = np.logical_and(abs(fileH2['log2FoldChange']) > 1,
-np.log10(fileH2["padj"]) > -np.log10(0.05)))
axs[axcord[H2ax][0], axcord[H2ax][1]].set_title(z + " vs control H2")
for ax in axs.flat:
ax.set(xlabel='log2FoldChange', ylabel='-log10 padj')
# Hide x labels and tick labels for top plots and y ticks for right plots.
for ax in axs.flat:
ax.label_outer()
plt.savefig(dir_path + "figures/SuppFig3.png", dpi = 600)
# + [markdown] id="Ag4VOlpF72Dh"
# ### Supp. Fig 8: Overview of diurnal data
# + id="JJ8VHomNbC3R"
# adapted from QC_scaled_updated.py
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
o_dir = dir_path + 'figures/'
sumdir = dir_path + 'summary_files/'
expdesc = ['all_stress', 'diurnal_exp', 'single_stress']
targetexp = expdesc[1]
targetp = sumdir + targetexp + '.txt'
expmatp = dir_path + 'prep_files/' + targetexp + '.tsv'
exps = [x.split("\t")[0] for x in open(targetp, "r").readlines()]
labels = [x.strip().split("\t")[1] + '_' + x.split("\t")[0].split('_')[1] for x in open(targetp, "r").readlines()]
df = pd.read_csv(expmatp, index_col = 0, sep = "\t", header = 0)
df.columns = labels
# =============================================================================
#
# PCA for diurnal by genes (Panel A)
#
# =============================================================================
pca = PCA(n_components=2)
diurnal_transformed = StandardScaler().fit_transform(df.values)
pcomp = pca.fit_transform(diurnal_transformed.T)
p_df = pd.DataFrame(data = pcomp, columns = ['PC1', 'PC2'])
pc1, pc2 = pca.explained_variance_ratio_
finalDf = p_df.copy()
finalDf['target'] = [x.split('_')[0] for x in df.columns.to_list()]
#plot PCA figure
PCA_plot = o_dir + 'SuppFig8A.png'
fig = plt.figure(figsize = (8,5))
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('Principal Component 1: ' + str(round(pc1, 2)), fontsize = 15)
ax.set_ylabel('Principal Component 2: ' + str(round(pc2, 2)), fontsize = 15)
targets = [x.split('_')[0] for i, x in enumerate(df.columns.to_list()) if i%3 == 0]
colors = ['r', 'y', 'g', 'b', 'c', 'm']
for target, color in zip(targets,colors):
indicesToKeep = finalDf['target'] == target
ax.scatter(finalDf.loc[indicesToKeep, 'PC1']
, finalDf.loc[indicesToKeep, 'PC2']
, c = color
, s = 50
, alpha = 0.5
, edgecolors = 'k')
ax.legend(targets, loc = 'upper right', fontsize='xx-small')
ax.grid()
plt.savefig(PCA_plot, dpi=600)
# =============================================================================
#
# PCA for diurnal by genes (Panel B)
#
# =============================================================================
# JTK_output info
wdir = dir_path + "diurnal/"
Mpodf = pd.read_csv(wdir + "Mpo_supp.txt", sep = "\t", index_col = 0)
#PCA part
df['target'] = [Mpodf.loc[x].LAG for x in df.index.to_list()]
diurnal_filt = df[df.target != 'NE']
diurnal_scaled = StandardScaler().fit_transform(diurnal_filt.iloc[:,:-1].T.values)
pca = PCA(n_components=2)
pcomp = pca.fit_transform(diurnal_scaled.T)
p_df = pd.DataFrame(data = pcomp, columns = ['PC1', 'PC2'])
pc1, pc2 = pca.explained_variance_ratio_
finalDf = p_df.copy()
finalDf['target'] = diurnal_filt.target.to_list()
#plot PCA figure
PCA_plot = o_dir + 'SuppFig8B.png'
fig = plt.figure(figsize = (8,5))
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('Principal Component 1: ' + str(round(pc1, 2)), fontsize = 15)
ax.set_ylabel('Principal Component 2: ' + str(round(pc2, 2)), fontsize = 15)
targets = list(finalDf.target.unique())
targets.sort()
num_only = [int(x) for x in targets[:-1]]
num_only.sort()
new_targets = [str(x) for x in num_only] + [targets[-1]]
colors = [(1,1,0), (1,0.75,0), (1,0.5,0),
(1,0.25,0), (1,0,0.25), (1,0,0.5),
(1,0,0.75), (1,0,1), (0.75,0,1),
(0.5,0,1), (0.25,0,1), (0,0,1),
(0.75,0.75,0.75)]
for target, color in zip(new_targets,colors):
indicesToKeep = finalDf['target'] == target
ax.scatter(finalDf.loc[indicesToKeep, 'PC1']
, finalDf.loc[indicesToKeep, 'PC2']
, color = color
, s = 50
, alpha = 0.5
, edgecolors = None)
ax.legend(new_targets, bbox_to_anchor=(1, 1), fontsize='x-small')
ax.grid()
plt.savefig(PCA_plot, dpi=600)
# + [markdown] id="HWapwnXyoItc"
# # 4. Experimental
# + [markdown] id="2DT-AxvEZtOw"
# ### 2.1 Download RNA-seq experiments !experimental
# + id="2hVebO7OZsP3"
kal_dir = dir_path + 'kal_out/'
def kal_index():
def get_ftp_links(RunID):
'''(str)->(lst,str)
Return ftp link in the paired and unpaired format for the RunID specified
'''
dir2 = ""
if 9 < len(RunID) <= 12:
dir2 = "0"*(12 - len(RunID)) + RunID[-(len(RunID)-9):] + "/"
dirs = RunID[:6] + "/" + dir2 + RunID
ftp_link_paired = [dirs + "/" + RunID + "_1.fastq.gz",
dirs + "/" + RunID + "_2.fastq.gz"]
ftp_link_unpaired = dirs + "/" + RunID + ".fastq.gz"
elif len(RunID) == 9:
dirs = RunID[:6] + "/" + RunID
ftp_link_paired = [dirs + "/" + RunID + "_1.fastq.gz",
dirs + "/" + RunID + "_2.fastq.gz"]
ftp_link_unpaired = dirs + "/" + RunID + ".fastq.gz"
return ftp_link_paired, ftp_link_unpaired
def kal_single(outname, index, SpotLen, flink):
# !kallisto quant -i $index -o $outname --single -l $SpotLen -s 20 -t 2 <(curl $flink)
def kal_paired(outname, index, flink1, flink2):
# !kallisto quant -i $index -o $outname -t 2 <(curl $flink1 $flink2)
# Download Rice experiments
kal_osa = kal_dir + 'osa/'
if not os.path.exists(kal_osa):
# !mkdir $kal_osa
RunTable = pd.read_csv(sum_dir + "selected_Osa.txt",
sep = "\t", header = 0)
for i in range(len(RunTable)):
runid = RunTable["Run"][i]
study = RunTable["Study"][i]
liblay = RunTable["Layout"][i]
spotlen = RunTable["Spot_length"][i]
if study + "_" + runid not in completed:
path_paired, path_single = get_ftp_links(runid)
print(str(i) + "\t" + path_single.split("/")[-1].split(".fastq.gz")[0] + "\t" + liblay + "\n")
if liblay == "SINGLE":
kal_single(kal_osa + study+'_'+runid, osa_idx, spotlen, pathsingle)
elif liblay == "PAIRED":
kal_paired(kal_osa + study+'_'+runid, osa_idx, path_paired[0], path_paired[1])
# + [markdown] id="PbX3ba-a2uNC"
# ### 2.2 Generate expression matrix !experimental
# + id="Eko_SXvA2mOn"
# Generation of gene expression matrix and kallisto statistics
def kal_extract(kout, exps):
'''(str,list)->(dict,dict)
Return dictionary containing tpm and raw expression value
'''
dicto = {}
dicto_raw = {}
output_header = 'gene\t'
output_content = ''
for folder in exps:
filep = kout + folder + '/abundance.tsv'
if os.path.exists(filep):
print('In directory ' + folder)
output_header += folder + '\t'
content = open(filep, 'r')
content.readline()
for item in content:
item, tpm = item.rstrip().split('\t')
raw = str(round(float(values[-2])))
if item in dicto:
dicto[item].append(tpm)
else:
dicto[item] = [tpm]
if item in dicto_raw:
dicto_raw[item].append(raw)
else:
dicto_raw[item] = [raw]
if '' in dicto:
dicto.pop('')
if '' in dicto_raw:
dicto.pop('')
return dicto, dicto_raw
def write_expmat(filepath, dicttouse):
'''(str, dict)->(None)
Writes expression matrix to file from dictionary
'''
with open(filepath, "w+") as output_file:
output_file.write(output_header[:-1] + "\n")
for key, value in dicttouse.items():
line = ''
line += key + '\t'
for item in value:
line += item + '\t'
output_file.write(line[:-1] + "\n")
def kal_stats(kout):
'''(str)->(None)
Writes summary of mapping statistics of kallisto runs to file
'''
kal_dirs = [x for x in os.listdir(kout)]
with open(kout + "kallisto_stats.txt", "w+") as output_file:
output_file.write("experiment\tn_processed\tn_pseudoaligned\tn_unique\tp_pseudoaligned\tp_unique\n")
for folder in kal_dirs:
kallisto_json = ast.literal_eval(open(kout + folder + '/run_info.json', 'r').read())
processed = kallisto_json["n_processed"]
pseudoaligned = kallisto_json["n_pseudoaligned"]
unique = kallisto_json["n_unique"]
ppseudoaligned = kallisto_json["p_pseudoaligned"]
punique = kallisto_json["p_unique"]
output_file.write(folder + "\t" +
str(processed) + "\t" +
str(pseudoaligned) + "\t" +
str(unique) + "\t" +
str(ppseudoaligned) + "\t" +
str(punique) + "\n")
# Marchantia
sum_dir = dir_path + 'summary_files/'
expdesc = ['all_stress', 'diurnal_exp', 'single_stress', 'cross_stress']
for targetexp in expdesc:
targetp = sum_dir + targetexp + '.txt'
expmatp = dir_path + 'prep_files/' + targetexp + '.tsv'
expmatrawp = dir_path + 'prep_files/' + targetexp + '_raw.tsv'
mpo_exps = [x.split("\t")[0] for x in open(targetp, "r").readlines()]
mpo_tpm, mpo_raw = kal_extract(kal_dir + 'mpo/', mpo_exps)
for i in [expmatp, expmatrawp]:
write_expmat(i, mpo_tpm)
write_expmat(i, mpo_raw)
kal_stats(kal_dir + 'mpo/', "kallisto_stats.txt")
# Rice
RunTable = pd.read_csv(sum_dir + "selected_Osa.txt",
sep = "\t", header = 0)
osa_runs = RunTable.Run.to_list()
osa_study = RunTable.Study.to_list()
osa_exps = [osa_study[i] + '_' + x for i, x in enumerate(osa_runs)]
expmatp = dir_path + 'prep_files/' + 'expmat_Osa.tsv'
expmatrawp = dir_path + 'prep_files/' + 'expmat_Osa_raw.tsv'
osa_tpm, osa_raw = kal_extract(kal_dir + 'osa/', osa_exps)
for i in [expmatp, expmatrawp]:
write_expmat(i, osa_tpm)
write_expmat(i, osa_raw)
kal_stats(kal_dir + 'osa/', "kallisto_stats.txt")
| marchantia_stress.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setup
# +
import os
import scanpy as sc
import numpy as np
import pandas as pd
from numpy.random.mtrand import RandomState
from sklearn.utils import check_random_state, check_array
from sklearn.decomposition import NMF
import gensim
from gensim import corpora, models, similarities
# -
# ## Preprocessing
# 1. Use raw count data
# 2. Use normalized data
# 3. Use tfidf (typical preprocessing for NLP)
# 4. Oana's preprocessing
# ### Raw Count Data
# ### Normalized Data
# ### TFIDF
# ### Oana's Preprocessing
#
#
# **slack convo**
#
# __oana 11:34 AM__
# Hi Kirk, I have a naive question about topic models.
# I know that the typical way people run the topic models is to take either the normalized counts, or the log1p rounded to an integer. I wonder how ok it would be to take the scaled values, set everything that's negative to 0, and then round the positive ones. I've done that and gotten results that make sense, but it doesn't feel mathematically sound. I wonder if you might have some insights about this.
#
# __kirkgosik 2:49 PM__
# are you basically getting 0s, and 1s then?
# 2:50
# they way I would think about that would be like asking the question, is this gene expressed above average in the dataset?
# 2:52
# I guess you would be getting the count you feed in to be the number of standard deviations you would be above the average expression for each gene
# 2:54
# I would think that would do a similiar thing as it does in PCA, if you don't scale highly expressed genes dominant if you do scale they get more evenly represented
# 2:55
# i would think it doesn't mathematically affect the algorithm of LDA but it would just impact how you interpret the results and what might come out as meaningful (edited)
| notebooks/preprocessing-explore.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # NLP 2 : Neural Embeddings, Text Classification, Text Generation
#
#
# To use statistical classifiers with text, it is first necessary to vectorize the text. In the first practical session we explored the **bag of word** model.
#
# Modern **state of the art** methods uses embeddings to vectorize the text before classification in order to avoid feature engineering.
#
# ## Dataset
# https://github.com/cedias/practicalNLP/tree/master/dataset
#
# ## "Modern" NLP pipeline
#
# By opposition to the **bag of word** model, in the modern NLP pipeline everything is **embeddings**. Instead of encoding a text as a **sparse vector** of length $D$ (size of feature dictionnary) the goal is to encode the text in a meaningful dense vector of a small size $|e| <<< |D|$.
#
#
# The raw classification pipeline is then the following:
#
# ```
# raw text ---|embedding table|--> vectors --|Neural Net|--> class
# ```
#
#
# ### Using a language model:
#
# How to tokenize the text and extract a feature dictionnary is still a manual task. To directly have meaningful embeddings, it is common to use a pre-trained language model such as `word2vec` which we explore in this practical.
#
# In this setting, the pipeline becomes the following:
# ```
#
# raw text ---|(pre-trained) Language Model|--> vectors --|classifier (or fine-tuning)|--> class
# ```
#
#
# - #### Classic word embeddings
#
# - [Word2Vec](https://arxiv.org/abs/1301.3781)
# - [Glove](https://nlp.stanford.edu/projects/glove/)
#
#
# - #### bleeding edge language models techniques (only here for reference)
#
# - [UMLFIT](https://arxiv.org/abs/1801.06146)
# - [ELMO](https://arxiv.org/abs/1802.05365)
# - [GPT](https://blog.openai.com/language-unsupervised/)
# - [BERT](https://arxiv.org/abs/1810.04805)
#
#
#
#
#
#
# ### Goal of this session:
#
# 1. Train word embeddings on training dataset
# 2. Tinker with the learnt embeddings and see learnt relations
# 3. Tinker with pre-trained embeddings.
# 4. Use those embeddings for classification
# 5. Compare different embedding models
# 6. Pytorch first look: learn to generate text.
#
#
#
#
#
# ## Loading data (same as in nlp 1)
# +
import json
from collections import Counter
#### /!\ YOU NEED TO UNZIP dataset/json_pol.zip first /!\
# Loading json
with open("data/json_pol",encoding="utf-8") as f:
data = f.readlines()
json_data = json.loads(data[0])
train = json_data["train"]
test = json_data["test"]
# Quick Check
counter_train = Counter((x[1] for x in train))
counter_test = Counter((x[1] for x in test))
print("Number of train reviews : ", len(train))
print("----> # of positive : ", counter_train[1])
print("----> # of negative : ", counter_train[0])
print("")
print(train[0])
print("")
print("Number of test reviews : ",len(test))
print("----> # of positive : ", counter_test[1])
print("----> # of negative : ", counter_test[0])
print("")
print(test[0])
print("")
# -
# ## Word2Vec: Quick Recap
#
# **[Word2Vec](https://arxiv.org/abs/1301.3781) is composed of two distinct language models (CBOW and SG), optimized to quickly learn word vectors**
#
#
# given a random text: `i'm taking the dog out for a walk`
#
#
#
# ### (a) Continuous Bag of Word (CBOW)
# - predicts a word given a context
#
# maximizing `p(dog | i'm taking the ___ out for a walk)`
#
# ### (b) Skip-Gram (SG)
# - predicts a context given a word
#
# maximizing `p(i'm taking the out for a walk | dog)`
#
#
#
#
# ## Step 1: train (or load) a language model (word2vec)
#
# Gensim has one of [Word2Vec](https://radimrehurek.com/gensim/models/word2vec.html) fastest implementation.
#
#
# ### Train:
# +
import gensim
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
text = [t.split() for t,p in train]
# the following configuration is the default configuration
w2v = gensim.models.word2vec.Word2Vec(sentences=text,
vector_size=100, window=5, ### here we train a cbow model
min_count=5,
sample=0.001, workers=3,
sg=1, hs=0, negative=5, ### set sg to 1 to train a sg model
cbow_mean=1,
epochs=5)
# -
# ### Load pre-trained embeddings:
# +
# It's for later
from gensim.models import KeyedVectors
from gensim.test.utils import datapath
# w2v = KeyedVectors.load_word2vec_format(datapath('downloaded_vectors_path'), binary=False)
word_vectors = w2v.wv
word_vectors.save("downloaded_vectors_path")
# Load back with memory-mapping = read-only, shared across processes.
wv = KeyedVectors.load("downloaded_vectors_path", mmap='r')
vector = wv['car'] # Get numpy vector of a word
# -
# In Gensim, embeddings are loaded and can be used via the ["KeyedVectors"](https://radimrehurek.com/gensim/models/keyedvectors.html) class
#
# > Since trained word vectors are independent from the way they were trained (Word2Vec, FastText, WordRank, VarEmbed etc), they can be represented by a standalone structure, as implemented in this module.
#
# >The structure is called “KeyedVectors” and is essentially a mapping between entities and vectors. Each entity is identified by its string id, so this is a mapping between {str => 1D numpy array}.
#
# >The entity typically corresponds to a word (so the mapping maps words to 1D vectors), but for some models, they key can also correspond to a document, a graph node etc. To generalize over different use-cases, this module calls the keys entities. Each entity is always represented by its string id, no matter whether the entity is a word, a document or a graph node.
# ## STEP 2: Test learnt embeddings
#
# The word embedding space directly encodes similarities between words: the vector coding for the word "great" will be closer to the vector coding for "good" than to the one coding for "bad". Generally, [cosine similarity](https://en.wikipedia.org/wiki/Cosine_similarity) is the distance used when considering distance between vectors.
#
# KeyedVectors have a built in [similarity](https://radimrehurek.com/gensim/models /keyedvectors.html#gensim.models.keyedvectors.BaseKeyedVectors.similarity) method to compute the cosine similarity between words
# is great really closer to good than to bad ?
print("great and good:",w2v.wv.similarity("great","good"))
print("great and bad:",w2v.wv.similarity("great","bad"))
# Since cosine distance encodes similarity, neighboring words are supposed to be similar. The [most_similar](https://radimrehurek.com/gensim/models/keyedvectors.html#gensim.models.keyedvectors.BaseKeyedVectors.most_similar) method returns the `topn` words given a query.
# The query can be as simple as a word, such as "movie"
# Try changing the word
print(w2v.wv.most_similar("movie",topn=5),"\n") # 5 most similar words
print(w2v.wv.most_similar("awesome",topn=5),"\n")
print(w2v.wv.most_similar("actor",topn=5),"\n")
# But it can be a more complicated query
# Word embedding spaces tend to encode much more.
#
# The most famous exemple is: `vec(king) - vec(man) + vec(woman) => vec(queen)`
# +
# What is awesome - good + bad ?
print(w2v.wv.most_similar(positive=["awesome","bad"],negative=["good"],topn=3),"\n")
print(w2v.wv.most_similar(positive=["actor","woman"],negative=["man"],topn=3),"\n") # do the famous exemple works for actor ?
# Try other things like plurals for exemple.
print(w2v.wv.most_similar(positive=["men","man"],negative=["women"],topn=3),"\n")
# -
# To test learnt "synctactic" and "semantic" similarities, Mikolov et al. introduced a special dataset containing a wide variety of three way similarities.
out = w2v.wv.evaluate_word_analogies("data/questions-words.txt",case_insensitive=True) # original semantic syntactic dataset.
# When training the w2v models on the review dataset, since it hasn't been learnt with a lot of data, it does not perform very well.
#
# ## STEP 3: sentiment classification
#
# In the previous practical session, we used a bag of word approach to transform text into vectors.
# Here, we propose to try to use word vectors (previously learnt or loaded).
#
#
# ### <font color='green'> Since we have only word vectors and that sentences are made of multiple words, we need to aggregate them. </font>
#
#
# ### (1) Vectorize reviews using word vectors:
#
# Word aggregation can be done in different ways:
#
# - Sum
# - Average
# - Min/feature
# - Max/feature
#
# #### a few pointers:
#
# - `w2v.wv.vocab` is a `set()` of the vocabulary (all existing words in your model)
# - `np.minimum(a,b) and np.maximum(a,b)` respectively return element-wise min/max
# +
import numpy as np
# We first need to vectorize text:
# First we propose to a sum of them
def vectorize(text,types="max"):
"""
This function should vectorize one review
input: str
output: np.array(float)
"""
vec = []
for i in text.split():
try :
vec.append(wv[i])
except :
vec.append(np.zeros(100))
if types == "mean" :
return np.array(vec).mean(axis=1)
if types == "sum":
return np.array(vec).sum(axis=1)
if types == "max" :
return np.array(vec).max(axis=1)
if types == "min" :
return np.array(vec).min(axis=1)
classes = np.array([pol for text,pol in train])
X = np.array([vectorize(text) for text,pol in train])
X_test = np.array([vectorize(text) for text,pol in test])
true = np.array([pol for text,pol in test])
#let's see what a review vector looks like.
print(X[0])
# -
print(len(X[2]))
# ### (2) Train a classifier
# as in the previous practical session, train a logistic regression to do sentiment classification with word vectors
#
#
# +
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report, accuracy_score, f1_score, roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn import svm
# X_train, X_test, Y_train, Y_test = train_test_split(X, Y, train_size=0.8)
# print(X_train.shape)
# print(X_test.shape)
# print(Y_train.shape)
# print(Y_test.shape)
maxi = 0
for i in range(X.shape[0]):
if len(X[i]) > maxi:
maxi = len(X[i])
print(maxi)
padded_array = np.zeros((X.shape[0], maxi))
n,m = padded_array.shape
print(n, m)
for i in range(n):
for j in range(m):
if j < len(X[i]):
padded_array[i][j] = X[i][j]
print(padded_array.shape)
# -
padded_array_test = np.zeros((X_test.shape[0], maxi))
n,m = padded_array_test.shape
print(n, m)
for i in range(n):
for j in range(m):
if j < len(X_test[i]):
padded_array_test[i][j] = X_test[i][j]
print(padded_array_test.shape)
clf = svm.SVC(max_iter=1000)
clf.fit(padded_array, classes)
preds = clf.predict(padded_array_test)
print(accuracy_score(preds, true))
print(classification_report(preds, true))
print(f1_score(preds, true))
# performance should be worst than with bag of word (~80%). Sum/Mean aggregation does not work well on long reviews (especially with many frequent words). This adds a lot of noise.
#
# ## **Todo** : Try answering the following questions:
#
# - Which word2vec model works best: skip-gram or cbow
# - Do pretrained vectors work best than those learnt on the train dataset ?
#
#
# **(Bonus)** To have a better accuracy, we could try two things:
# - Better aggregation methods (weight by tf-idf ?)
# - Another word vectorizing method such as [fasttext](https://radimrehurek.com/gensim/models/fasttext.html)
# - A document vectorizing method such as [Doc2Vec](https://radimrehurek.com/gensim/models/doc2vec.html)
# ## --- Generate text with a recurrent neural network (Pytorch) ---
# ### (Mostly Read & Run)
#
# The goal is to replicate the (famous) experiment from [Karpathy's blog](http://karpathy.github.io/2015/05/21/rnn-effectiveness/)
#
# To learn to generate text, we train a recurrent neural network to do the following task:
#
# Given a "chunk" of text: `this is random text`
#
# the goal of the network is to predict each character in **`his is random text` ** sequentially given the following sequential input **`this is random tex`**:
#
#
# + active=""
# Input -> Output
# --------------
# T -> H
# H -> I
# I -> S
# S -> " "
# " " -> I
# I -> S
# S -> " "
# [...]
# -
#
# ## Load text (dataset/input.txt)
#
# Before building training batch, we load the full text in RAM
# +
import unidecode
import string
import random
import re
import torch
import torch.nn as nn
all_characters = string.printable
n_characters = len(all_characters)
file = unidecode.unidecode(open('data/input.txt').read()) #clean text => only ascii
file_len = len(file)
print('file_len =', file_len)
# -
# ## 2: Helper functions:
#
# We have a text and we want to feed batch of chunks to a neural network:
#
# one chunk A,B,C,D,E
# [input] A,B,C,D -> B,C,D,E [output]
#
# Note: we will use an embedding layer instead of a one-hot encoding scheme.
#
# for this, we have 3 functions:
#
# - One to get a random str chunk of size `chunk_len` : `random_chunk`
# - One to turn a chunk into a tensor of size `(1,chunk_len)` coding for each characters : `char_tensor`
# - One to return random input and output chunks of size `(batch_size,chunk_len)` : `random_training_set`
#
#
#
# +
import time, math
#Get a piece of text
def random_chunk(chunk_len):
start_index = random.randint(0, file_len - chunk_len)
end_index = start_index + chunk_len + 1
return file[start_index:end_index]
# Turn string into list of longs
def char_tensor(string):
tensor = torch.zeros(1,len(string)).long()
for c in range(len(string)):
tensor[0,c] = all_characters.index(string[c])
return tensor
#Turn a piece of text in train/test
def random_training_set(chunk_len=200, batch_size=8):
chunks = [random_chunk(chunk_len) for _ in range(batch_size)]
inp = torch.cat([char_tensor(chunk[:-1]) for chunk in chunks],dim=0)
target = torch.cat([char_tensor(chunk[1:]) for chunk in chunks],dim=0)
return inp, target
print(random_training_set(10,4)) ## should return 8 chunks of 10 letters.
# -
# ## The actual RNN model (only thing to complete):
#
# It should be composed of three distinct modules:
#
# - an [embedding layer](https://pytorch.org/docs/stable/nn.html#embedding) (n_characters, hidden_size)
#
# ```
# nn.Embedding(len_dic,size_vec)
# ```
# - a [recurrent](https://pytorch.org/docs/stable/nn.html#recurrent-layers) layer (hidden_size, hidden_size)
# ```
# nn.RNN(in_size,out_size) or nn.GRU() or nn.LSTM() => rnn_cell parameter
# ```
# - a [prediction](https://pytorch.org/docs/stable/nn.html#linear) layer (hidden_size, output_size)
#
# ```
# nn.Linear(in_size,out_size)
# ```
# => Complete the `init` function code
# +
import torch.nn.functional as f
class RNN(nn.Module):
def __init__(self, n_char, hidden_size, output_size, n_layers=1,rnn_cell=nn.RNN):
"""
Create the network
"""
super(RNN, self).__init__()
self.n_char = n_char
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
# (batch,chunk_len) -> (batch, chunk_len, hidden_size)
self.embed = nn.Embedding(n_char, hidden_size)
# (batch, chunk_len, hidden_size) -> (batch, chunk_len, hidden_size)
self.rnn = rnn_cell(hidden_size, hidden_size)
#(batch, chunk_len, hidden_size) -> (batch, chunk_len, output_size)
self.predict = nn.Linear(hidden_size, output_size)
def forward(self, input):
"""
batched forward: input is (batch > 1,chunk_len)
"""
input = self.embed(input)
output,_ = self.rnn(input)
output = self.predict(f.tanh(output))
return output
def forward_seq(self, input,hidden=None):
"""
not batched forward: input is (1,chunk_len)
"""
input = self.embed(input)
output,hidden = self.rnn(input.unsqueeze(0),hidden)
output = self.predict(f.tanh(output))
return output,hidden
# -
# ## Text generation function
#
# Sample text from the model
def generate(model,prime_str='A', predict_len=100, temperature=0.8):
prime_input = char_tensor(prime_str).squeeze(0)
hidden = None
predicted = prime_str+""
# Use priming string to "build up" hidden state
for p in range(len(prime_str)-1):
_,hidden = model.forward_seq(prime_input[p].unsqueeze(0),hidden)
#print(hidden.size())
for p in range(predict_len):
output, hidden = model.forward_seq(prime_input[-1].unsqueeze(0), hidden)
# Sample from the network as a multinomial distribution
output_dist = output.data.view(-1).div(temperature).exp()
#print(output_dist)
top_i = torch.multinomial(output_dist, 1)[0]
#print(top_i)
# Add predicted character to string and use as next input
predicted_char = all_characters[top_i]
predicted += predicted_char
prime_input = torch.cat([prime_input,char_tensor(predicted_char).squeeze(0)])
return predicted
# ## Training loop for net
# +
def time_since(since):
s = time.time() - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
###Parameters
n_epochs = 10000
print_every = 100
plot_every = 10
hidden_size = 256
n_layers = 2
lr = 0.005
batch_size = 16
chunk_len = 20
####
model = RNN(n_characters, hidden_size, n_characters, n_layers, nn.LSTM) #create model
model_optimizer = torch.optim.Adam(model.parameters(), lr=lr) #create Adam optimizer
criterion = nn.CrossEntropyLoss() #chose criterion
start = time.time()
all_losses = []
loss_avg = 0
def train(inp, target):
"""
Train sequence for one chunk:
"""
#reset gradients
model_optimizer.zero_grad()
# predict output
output = model(inp)
#compute loss
loss = criterion(output.view(batch_size*chunk_len,-1), target.view(-1))
#compute gradients and backpropagate
loss.backward()
model_optimizer.step()
return loss.data.item()
for epoch in range(1, n_epochs + 1):
loss = train(*random_training_set(chunk_len,batch_size)) #train on one chunk
loss_avg += loss
if epoch % print_every == 0:
print('[%s (%d %d%%) %.4f]' % (time_since(start), epoch, epoch / n_epochs * 100, loss))
print(generate(model,'Wh', 100), '\n')
if epoch % plot_every == 0:
all_losses.append(loss_avg / plot_every)
loss_avg = 0
# -
# ## Visualize loss
# +
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# %matplotlib inline
plt.figure()
plt.plot(all_losses)
plt.show()
# -
# ## Try different temperatures
#
# Changing the distribution sharpness has an impact on character sampling:
#
# more or less probable things are sampled
print(generate(model,'T', 200, temperature=1))
print("----")
print(generate(model,'Th', 200, temperature=0.8))
print("----")
print(generate(model,'Th', 200, temperature=0.5))
print("----")
print(generate(model,'Th', 200, temperature=0.3))
print("----")
print(generate(model,'Th', 200, temperature=0.1))
# ### Improving this code:
#
# (a) Tinker with parameters:
#
# - Is it really necessary to have 100 dims character embeddings
# - Chunk length can be gradually increased
# - Try changing RNN cell type (GRUs - LSTMs)
#
# (b) Add GPU support to go faster
#
# ## ------ End of practical
#
# #### Legacy loading code
# +
# import glob
# from os.path import split as pathsplit
# dir_train = "data/aclImdb/train/"
# dir_test = "data/aclImdb/test/"
# train_files = glob.glob(dir_train+'pos/*.txt') + glob.glob(dir_train+'neg/*.txt')
# test_files = glob.glob(dir_test+'pos/*.txt') + glob.glob(dir_test+'neg/*.txt')
# def get_polarity(f):
# """
# Extracts polarity from filename:
# 0 is negative (< 5)
# 1 is positive (> 5)
# """
# _,name = pathsplit(f)
# if int(name.split('_')[1].split('.')[0]) < 5:
# return 0
# else:
# return 1
# def open_one(f):
# polarity = get_polarity(f)
# with open(f,"r") as review:
# text = " ".join(review.readlines()).strip()
# return (text,polarity)
# print(open_one(train_files[0]))
# train = [open_one(x) for x in train_files] #contains (text,pol) couples
# test = [open_one(x) for x in test_files] #contains (text,pol) couples
# -
| S2/RITAL/TAL/TME/TME2/Dan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: river
# language: python
# name: python3
# ---
# # Content personalization
# ## Without context
# This example takes inspiration from Vowpal Wabbit's [excellent tutorial](https://vowpalwabbit.org/tutorials/cb_simulation.html).
#
# Content personalization is about taking into account user preferences. It's a special case of recommender systems. Ideally, side-information should be taken into account in addition to the user. But we'll start with something simpler. We'll assume that each user has stable preferences that are independent of the context. We capture this by implementing a "reward" function.
# +
users = ['Tom', 'Anna']
items = {'politics', 'sports', 'music', 'food', 'finance', 'health', 'camping'}
def get_reward(user, item) -> bool:
if user == 'Tom':
return item in {'music', 'politics'}
if user == 'Anna':
return item in {'politics', 'sports'}
# -
# Measuring the performance of a recommendation is not straightforward, mostly because of the interactive aspect of recommender systems. In a real situation, recommendations are presented to a user, and the user gives feedback indicating whether they like what they have been recommended or not. This feedback loop can't be captured entirely by a historical dataset. Some kind of simulator is required to generate recommendations and capture feedback. We already have a reward function. Now let's implement a simulation function.
# +
import random
import matplotlib.pyplot as plt
def plot_ctr(ctr):
plt.plot(range(1, len(ctr) + 1), ctr)
plt.xlabel('n_iterations', fontsize=14)
plt.ylabel('CTR', fontsize=14)
plt.ylim([0, 1])
plt.title(f'final CTR: {ctr[-1]:.2%}', fontsize=14)
plt.grid()
def simulate(n, reward_func, model, seed):
rng = random.Random(seed)
n_clicks = 0
ctr = [] # click-through rate along time
for i in range(n):
# Pick a user at random
user = rng.choice(users)
# Make a single recommendation
item = model.rank(user, items=items)[0]
# Measure the reward
clicked = reward_func(user, item)
n_clicks += clicked
ctr.append(n_clicks / (i + 1))
# Update the model
model.learn_one(user, item, clicked)
plot_ctr(ctr)
# -
# This simulation function does quite a few things. It can be seen as a simple reinforcement learning simulation. It samples a user, and then ask the model to provide a single recommendation. The user then gives as to whether they liked the recommendation or not. Crucially, the user doesn't tell us what item they would have liked. We could model this as a multi-class classification problem if that were the case.
#
# The strategy parameter determines the mechanism used to generate the recommendations. The `'best'` strategy means that the items are each scored by the model, and are then ranked from the most preferred to the least preferred. Here the most preferred item is the one which gets recommended. But you could imagine all sorts of alternative ways to proceed.
#
# We can first evaluate a recommended which acts completely at random. It assigns a random preference to each item, regardless of the user.
# +
from river import reco
model = reco.RandomNormal(seed=10)
simulate(5_000, get_reward, model, seed=42)
# -
# We can see that the click-through rate (CTR) oscillates around 28.74%. In fact, this model is expected to be correct `100 * (2 / 7)% = 28.57%` of the time. Indeed, each user likes two items, and there are seven items in total.
#
# Let's now use the `Baseline` recommended. This one models each preference as the following sum:
#
# $$preference = \bar{y} + b_{u} + b_{i}$$
#
# where
#
# - $\bar{y}$ is the average CTR overall
# - $b_{u}$ is the average CTR per user minus $\bar{y}$ -- it's therefore called a *bias*
# - $b_{i}$ is the average CTR per item minus $\bar{y}$
#
# This model is considered to be a baseline because it doesn't actually learn what items are preferred by each user. Instead it models each user and item separately. We shouldn't expect it to be a strong model. It should however do better than the random model used above.
model = reco.Baseline(seed=10)
simulate(5_000, get_reward, model, seed=42)
# This baseline model seems perfect, which is surprising. The reason why it works so well is because both users have in common that they both like politics. The model therefore learns that the `'politics'` is a good item to recommend.
model.i_biases
# The model is not as performant if we use a reward function where both users have different preferences.
simulate(
5_000,
reward_func=lambda user, item: (
item in {'music', 'politics'} if user == "Tom" else
item in {'food', 'sports'}
),
model=model,
seed=42
)
# A good recommender model should at the very least understand what kind of items each user prefers. One of the simplest and yet performant way to do this is <NAME>'s SGD method he developped for the Netflix challenge and wrote about [here](https://sifter.org/simon/journal/20061211.html). It models each user and each item as latent vectors. The dot product of these two vectors is the expected preference of the user for the item.
model = reco.FunkMF(seed=10)
simulate(5_000, get_reward, model, seed=42)
# We can see that this model learns what items each user enjoys very well. Of course, there are some caveats. In our simulation, we ask the model to recommend the item most likely to be preferred for each user. Indeed, we rank all the items and pick the item at the top of the list. We do this many times for only two users.
#
# This is of course not realistic. Users will get fed up with recommendations if they're always shown the same item. It's important to include diversity into recommendations, and to let the model explore other options instead of always focusing on the item with the highest score. This is where evaluating recommender systems gets tricky: the reward function itself is difficult to model.
#
# We will keep ignoring these caveats in this notebook. Instead we will focus on a different concern: making recommendations when context is involved.
# ## With context
# We'll add some context by making it so that user preferences change depending on the time the day. Very simply, preferences might change from morning to afternoon. This is captured by the following reward function.
# +
times_of_day = ['morning', 'afternoon']
def get_reward(user, item, context):
if user == 'Tom':
if context['time_of_day'] == 'morning':
return item == 'politics'
if context['time_of_day'] == 'afternoon':
return item == 'music'
if user == 'Anna':
if context['time_of_day'] == 'morning':
return item == 'sports'
if context['time_of_day'] == 'afternoon':
return item == 'politics'
# -
# We have to update our simulation function to generate a random context at each step. We also want our model to use it for recommending items as well as learning.
def simulate(n, reward_func, model, seed):
rng = random.Random(seed)
n_clicks = 0
ctr = []
for i in range(n):
user = rng.choice(users)
# New: pass a context
context = {'time_of_day': rng.choice(times_of_day)}
item = model.rank(user, items, context)[0]
clicked = reward_func(user, item, context)
n_clicks += clicked
ctr.append(n_clicks / (i + 1))
# New: pass a context
model.learn_one(user, item, clicked, context)
plot_ctr(ctr)
# Not all models are capable of taking into account context. For instance, the `FunkMF` model only models users and items. It completely ignores the context, even when we provide one. All recommender models inherit from the base `Recommender` class. They also have a property which indicates whether or not they are able to handle context:
model = reco.FunkMF(seed=10)
model.is_contextual
# Let's see well it performs.
simulate(5_000, get_reward, model, seed=42)
# The performance has roughly been divided by half. This is most likely because there are now two times of day, and if the model has learnt preferences for one time of the day, then it's expected to be wrong half of the time.
#
# Before delving into recsys models that can handle context, a simple hack is to notice that we can append the time of day to the user. This effectively results in new users which our model can distinguish between. We could apply this trick during the simulation, but we can also override the behavior of the `learn_one` and `rank` methods of our model.
# +
class FunkMFWithHack(reco.FunkMF):
def learn_one(self, user, item, reward, context):
user = f"{user}@{context['time_of_day']}"
return super().learn_one(user, item, reward, context)
def rank(self, user, items, context):
user = f"{user}@{context['time_of_day']}"
return super().rank(user, items, context)
model = FunkMFWithHack(seed=29)
simulate(5_000, get_reward, model, seed=42)
# -
# We can verify that the model has learnt the correct preferences by looking at the expected preference for each `(user, item)` pair.
# +
import pandas as pd
(
pd.DataFrame(
{
'user': user,
'item': item,
'preference': model.predict_one(user, item)
}
for user in model.u_latents
for item in model.i_latents
)
.pivot('user', 'item')
.style.highlight_max(color='lightgreen', axis='columns')
)
| docs/examples/content-personalization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 在一分钟内实现一个图形分类应用
#
# 在本教程中,我们会演示使用TinyMS 0.3.1 API构建LeNet5模型,下载数据集,训练,启动服务器和推理的过程。
#
# ## 环境要求
# - Ubuntu: `18.04`
# - docker: `v18.06.1-ce`
# - Jupyter: 使用`TinyMS 0.3.1-Jupyter`,可参考 [快速安装TinyMS](https://tinyms.readthedocs.io/zh_CN/latest/quickstart/install.html) 部署该环境。
#
# ## 介绍
#
# TinyMS是一个高级API,目的是让新手用户能够更加轻松地上手深度学习。TinyMS可以有效地减少用户在构建、训练、验证和推理一个模型过程中的操作次数。TinyMS也提供了教程和文档帮助开发者更好的上手和开发。
#
# 本教程包括6部分:`构建模型`、`下载数据集`、`训练`、`定义servable json`、`启动服务器`和`推理`,其中服务器将在一个子进程中启动。
# +
import os
import json
from PIL import Image
from tinyms import context
from tinyms.data import MnistDataset, download_dataset
from tinyms.vision import mnist_transform, ImageViewer
from tinyms.model import Model, lenet5
import tinyms.optimizers as opt
from tinyms.serving import Server, Client
from tinyms.metrics import Accuracy
from tinyms.losses import SoftmaxCrossEntropyWithLogits
from tinyms.callbacks import ModelCheckpoint, CheckpointConfig, LossMonitor
# -
# ### 1. 构建模型
#
# TinyMS封装了MindSpore LeNet5模型中的init和construct函数,代码行数能够大大减少,原有的大量代码段行数会被极限压缩:
# 构建网络
net = lenet5(class_num=10)
model = Model(net)
# ### 2. 下载数据集
#
# 如果`/home/jovyan`文件夹下没有创建`mnist`文件夹则MNIST数据集会被自动下载并存放到`/home/jovyan`文件夹,如果`mnist`文件夹已经存在于`/home/jovyan`文件夹,则此步操作会被跳过。
# 下载数据集
mnist_path = '/home/jovyan/mnist'
if not os.path.exists(mnist_path):
download_dataset('mnist', '/home/jovyan')
print('************Download complete*************')
else:
print('************Dataset already exists.**************')
# ### 3. 训练模型
#
# 数据集中的训练集、验证集都会在此步骤中定义,同时也会定义训练参数。训练后生成的ckpt文件会保存到`/home/jovyan/tinyms/serving/lenet5`文件夹以便后续使用,训练完成后会进行验证并输出 `Accuracy`指标。
# +
# 创建mnist路径
ckpt_folder = '/home/jovyan/tinyms/serving/lenet5'
ckpt_path = '/home/jovyan/tinyms/serving/lenet5/lenet5.ckpt'
if not os.path.exists(ckpt_folder):
# !mkdir -p /home/jovyan/tinyms/serving/lenet5
else:
print('lenet5 ckpt folder already exists')
# 设置环境参数
device_target = "CPU"
context.set_context(mode=context.GRAPH_MODE, device_target=device_target)
dataset_sink_mode = False
# 创建数据集
train_dataset = MnistDataset(os.path.join(mnist_path, "train"), shuffle=True)
train_dataset = mnist_transform.apply_ds(train_dataset)
eval_dataset = MnistDataset(os.path.join(mnist_path, "test"), shuffle=True)
eval_dataset = mnist_transform.apply_ds(eval_dataset)
# 设置训练参数
lr = 0.01
momentum = 0.9
epoch_size = 1
batch_size = 32
# 定义loss函数
net_loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
# 定义optimizer
net_opt = opt.Momentum(net.trainable_params(), lr, momentum)
net_metrics={"Accuracy": Accuracy()}
model.compile(loss_fn=net_loss, optimizer=net_opt, metrics=net_metrics)
print('************************Start training*************************')
ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10))
model.train(epoch_size, train_dataset, callbacks=[ckpoint_cb, LossMonitor()],dataset_sink_mode=dataset_sink_mode)
print('************************Finished training*************************')
model.save_checkpoint(ckpt_path)
model.load_checkpoint(ckpt_path)
print('************************Start evaluation*************************')
acc = model.eval(eval_dataset, dataset_sink_mode=dataset_sink_mode)
print("============== Accuracy:{} ==============".format(acc))
# -
# ### 4. 定义servable.json
#
# 定义lenet5 servable json文件:servable json文件定义了servable名称,模型名称,模型格式和分类数量,以便后续推理使用。
# +
servable_json = [{'name': 'lenet5',
'description': 'This servable hosts a lenet5 model predicting numbers',
'model': {
"name": "lenet5",
"format": "ckpt",
"class_num": 10}}]
os.chdir("/home/jovyan/tinyms/serving")
json_data = json.dumps(servable_json, indent=4)
with open('servable.json', 'w') as json_file:
json_file.write(json_data)
# -
# ### 5. 启动服务器
#
# #### 5.1 介绍
# TinyMS推理是C/S(Client/Server)架构。TinyMS使用[Flask](https://flask.palletsprojects.com/en/1.1.x/)这个轻量化的网页服务器架构作为C/S通讯的基础架构。为了能够对模型进行推理,用户必须首先启动服务器。如果成功启动,服务器会在子进程中运行并且会监听从地址127.0.0.1,端口号5000发送来的POST请求并且使用MindSpore作为后端来处理这些请求。后端会构建模型,运行推理并且返回结果给客户端。
#
# #### 5.2 启动服务器
#
# 运行下列代码以启动服务器:
server = Server(serving_path="/home/jovyan/tinyms/serving/")
server.start_server()
# 出现如上提示说明服务器已启动且正在运行。
# 服务器启动后,我们要到菜单栏点击`File`=>`new Notebook`创建一个新的jupyter的文件,然后继续完成客户端的操作。
# +
import os
from PIL import Image
from tinyms.vision import ImageViewer
from tinyms.serving import Server, Client
# -
# ### 6. 推理
#
# #### 6.1 上传图片
#
# 用户需要上传一张0~9之间的数字图片作为输入。如果使用命令行终端,可以使用`scp`或者`wget`命令获取图片,如果使用Jupyter,点击菜单右上方的`Upload`按钮并且选择上传的图片。本教程中使用的图片可以点击[这里](https://ascend-tutorials.obs.cn-north-4.myhuaweicloud.com/tinyms-test-pics/numbers/7.png)进行下载,将图片保存在`/home/jovyan`文件夹下,重命名为'7.png'(或者任何用户喜欢的名字)。
#
# 或者运行如下的代码下载图片:
if not os.path.exists('/home/jovyan/7.png'):
# !wget -P /home/jovyan https://ascend-tutorials.obs.cn-north-4.myhuaweicloud.com/tinyms-test-pics/numbers/7.png
else:
print('7.png already exists')
# #### 6.2 List servables
#
# 使用`list_servables`函数检查当前后端的serving模型。
client=Client()
client.list_servables()
# 如果输出的`description`字段显示这是一个`lenet5`的模型,则可以继续到下一步发送推理请求。
# #### 6.3 发送推理请求
#
# 运行`predict`函数发送推理请求,第4个参数选择`TOP1_CLASS`或者`TOP5_CLASS`输出:
# +
# 设置图片路径和输出策略(可以在TOP1和TOP5中选择)
image_path = "/home/jovyan/7.png"
strategy = "TOP1_CLASS"
# predict(image_path, servable_name, dataset='mnist', strategy='TOP1_CLASS')
# predict方法的四个参数分别是图片路径、servable名称,数据集名称(默认MNIST)和输出策略(默认输出TOP1,可以选择TOP5)
img_viewer = ImageViewer(Image.open(image_path), image_path)
img_viewer.show()
print(client.predict(image_path,'lenet5', 'mnist', strategy))
# -
# 如果用户能看到类似如下输出:
# ```
# TOP1: 7, score: 0.99934917688369750977
# ```
# 那么意味着已经进行了一次成功的推理。
# ## 关闭服务器
#
# 运行以下代码关闭服务器:
server.shutdown()
| docs/zh_CN/source/quickstart/quickstart_in_one_minute.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This is in how `data/mongo-novice-materials.json` was built.
#
# See https://github.com/materialsproject/mapidoc for details.
#
# This notebook using the `pymatgen` and `funcy` third-party packages.
from pymatgen import MPRester
mpr = MPRester()
materials = mpr.query({},[
'material_id',
'spacegroup',
'elements',
'nelements',
'elasticity',
'pretty_formula',
'chemsys'
])
# +
import json
with open('data/mongo-novice-materials.json', 'w') as f:
json.dump(materials, f)
# -
# !du -h data/mongo-novice-materials.json
# ## Post-processing
# +
import json
with open('data/mongo-novice-materials.json') as f:
materials = json.load(f)
# -
# JSON keys specified by doi:10.1038/sdata.2015.9
elasticity_blessed_keys = [
'elastic_tensor',
'K_Voigt',
'K_Reuss',
'G_Voigt',
'G_Reuss',
'K_VRH',
'G_VRH',
'elastic_anisotropy',
'poisson_ratio',
'calculations', # not in doi:10.1038/sdata.2015.9, but include it
]
# +
for m in materials:
if not m.get('elasticity'):
continue
elasticity = m['elasticity']
for k in list(elasticity.keys()):
if k not in elasticity_blessed_keys:
del elasticity[k]
for m in materials:
if m.get('elasticity'):
assert 'homogeneous_poisson' not in m['elasticity'].keys()
# +
import json
with open('data/mongo-novice-materials.json', 'w') as f:
json.dump(materials, f)
# -
# !du -h data/mongo-novice-materials.json
| get_source_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Convolutional Neural Network (CNN)
# loading libraries
import keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Flatten, Dropout, Conv2D, MaxPooling2D
import matplotlib as mpl
import seaborn as sns
np.random.seed(1367)
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
sns.set_style("ticks", {"xtick.direction": u"in", "ytick.direction": u"in"})
mpl.rcParams["axes.linewidth"] = 2
mpl.rcParams["lines.linewidth"] = 3
(X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data()
_, img_rows, img_cols = X_train.shape
NUM_CLASSES = len(np.unique(y_train))
NUM_INPUT_NODES = img_rows * img_cols
print(F"Number of training samples: {X_train.shape[0]}")
print(F"Number of test samples: {X_test.shape[0]}")
print(F"Image rows: {X_train.shape[1]}")
print(F"Image columns: {X_train.shape[2]}")
print(F"Number of classes: {NUM_CLASSES}")
X_train.shape
#reshaping
#this assumes our data format
#For 3D data, "channels_last" assumes (conv_dim1, conv_dim2, conv_dim3, channels) while
#"channels_first" assumes (channels, conv_dim1, conv_dim2, conv_dim3).
if tf.keras.backend.image_data_format() == "channels_first":
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
#more reshaping
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
print("X_train shape:", X_train.shape) #X_train shape: (60000, 28, 28, 1)
# +
SAVE_FIG = True
fig = plt.figure(figsize=(10,5))
for i in range(NUM_CLASSES):
ax = fig.add_subplot(2, 5, 1 + i, xticks=[], yticks=[])
idx = X_train[y_train[:]==i,:]
ax.set_title("Class: " + str(i) , fontsize = 20)
plt.imshow(idx[1], cmap="gray")
if SAVE_FIG:
plt.savefig("../assets/header.png" , bbox_inches="tight")
plt.show()
# -
def cnn():
"""
Define a cnn model structure
"""
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation="relu",
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, activation="softmax"))
return model
def plot_results(results):
"""
Plot accuracy/loss through epochs
"""
fig, axs = plt.subplots(1, 2, figsize=(15, 5))
# summarize history for accuracy
axs[0].plot(
range(1, len(results.history["accuracy"]) + 1),
results.history["accuracy"],
color="navy",
ls="--",
label="Training",
)
axs[0].plot(
range(1, len(results.history["val_accuracy"]) + 1),
results.history["val_accuracy"],
color="cyan",
ls="--",
label="Validation",
)
axs[0].set_ylabel("Accuracy", fontsize=15)
axs[0].set_xlabel("Epoch", fontsize=15)
axs[0].legend(prop={"size": 13}, loc=0, framealpha=0.0)
# summarize history for loss
axs[1].plot(
range(1, len(results.history["loss"]) + 1),
results.history["loss"],
color="navy",
ls="--",
label="Training",
)
axs[1].plot(
range(1, len(results.history["val_loss"]) + 1),
results.history["val_loss"],
color="cyan",
ls="--",
label="Validation",
)
axs[1].set_ylabel("Loss", fontsize=15)
axs[1].set_xlabel("Epoch", fontsize=15)
axs[1].legend(prop={"size": 13}, loc=0, framealpha=0.0)
plt.savefig("../assets/performance_cnn.png" , bbox_inches="tight")
plt.show()
def accuracy(results, X_test, y_test):
"""
Accuracy metric
"""
y_pred_proba = results.model.predict(X_test)
y_pred = np.argmax(y_pred_proba, axis=1)
num_correct = np.sum(y_pred == y_test)
accuracy = float(num_correct) / y_pred_proba.shape[0]
return accuracy * 100
# ## Training
# +
# define model
model = cnn()
# compile
model.compile(optimizer=tf.keras.optimizers.Adam(0.0005),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
start = time.time()
# fit
results = model.fit(X_train, y_train,
batch_size=32,
epochs=25,
verbose=1,
validation_data=(X_test , y_test))
end = time.time()
time_elapsed = end - start
# -
results.model.summary()
# +
# plot model history
plot_results(results)
print(F"Model took {time_elapsed:.3f} seconds to train")
# compute test accuracy
print(F"Accuracy on test data is: {accuracy(results, X_test, y_test):.2f}%")
# -
# saving model
results.model.save("../assets/model_cnn.h5")
m = tf.keras.models.load_model("../assets/model_cnn.h5")
m.summary()
| apps/handwritten-digits-recognizer/notebooks/cnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.0 (Programa)
# language: julia
# name: julia-0.6-programa
# ---
# [FProfile.jl](https://github.com/cstjean/FProfile.jl) provides an alternative interface for Julia's sampling profiler (`@profile`). If you've never used a sampling profiler before, please read [the introduction of this document](https://docs.julialang.org/en/latest/manual/profile/) before proceeding .
# # Profiling
#
# You can build a profile by calling `@fprofile(code, delay=0.001, n_samples=1000000)`:
# +
using FProfile, Calculus
pd = @fprofile second_derivative(sin, 1.0)
# -
# `@fprofile(N, ...)` is shorthand for `@fprofile(for _ in 1:N ... end)`:
pd = @fprofile 1000000 second_derivative(sin, 1.0)
# Do not forget that Julia compiles code the first time a function is run; if you do not want to measure compilation time, execute your code once before profiling.
# # Flat view
#
# FProfile's `flat` report is a [dataframe](http://juliadata.github.io/DataFrames.jl/stable/man/getting_started/#Getting-Started-1). No particular knowledge of dataframes is necessary. I'll provide a few common operations below.
# +
using DataFrames
df = flat(pd)
head(df, 15) # show only the first 15 rows (the 15 rows with the highest counts)
# -
# (<i>REPL note</i>: if the output of `flat` is [incomplete](https://github.com/JuliaData/DataFrames.jl/issues/1272), try `showall(flat(pd))` or `Matrix(flat(pd))`)
#
# The first column shows what fraction of backtraces (in %) go through the `method at file:line_number` in the `stackframe` column. It's the same quantity as in `Base.Profile.print()`, except for recursive calls: if `factorial(2)` calls `factorial(1)`, that's 2 counts in Base's report, but only 1 count in FProfile.
#
# You can select a subset of the dataframe by using one of the five accessors: `get_specialization, get_method, get_file, get_function` and `get_module`.
df[get_function.(df[:stackframe]) .== derivative, :] # select the `derivative` rows
# It is common to focus optimization efforts on one or more modules at a time (... the ones you're developing). `flat(pd, MyModule)` filters out other modules and adds a useful column: `self_pct` measures how much `MyModule`-specific work is done on that line.
#
# For instance, in the code below, while the `do_computation()` call takes a long time (it has a high `count_percent`), it merely calls another `Main` function, so it has a low `self_pct`. `sum_of_sin` has `self_pct = 100%` because while it calls `sum` and `sin`, those are defined in another module (`Base`), and counted as external to `Main`.
#
# `flat(pd, (Module1, Module2, ...))` is also accepted.
@noinline do_computation(n) = sum_of_sin(n)
@noinline sum_of_sin(n) = sum(sin, 1:n)
pd2 = @fprofile do_computation(10000000)
flat(pd2, Main)
# It pays to make sure that functions with a high `self_pct` are [well optimized](https://docs.julialang.org/en/latest/manual/performance-tips/).
#
# Another way to reduce the level of detail is to aggregate by `:specialization, :method, :file, :function`, or `:module`.
df_by_fn = flat(pd, combineby=:function)
# You can see the context (caller/called functions) around each of these rows by passing it to `tree`:
tree(pd, df_by_fn, 9) # show the context of the 9th row of `df_by_method`
# Other useful dataframe commands:
#
# ```julia
# sort(df, :self_pct, rev=true) # sort by self_pct
# showall(df) # show the whole dataframe
# ```
#
# See `?flat` for more options.
# #### Comparing results
#
# Pass two `ProfileData` objects to `flat` to compare them. The results are sorted with the biggest regressions (in absolute terms) at the top and the biggest improvements at the bottom (see `?DataFrames.tail`).
pd2 = @fprofile 1000000 second_derivative(sin, 1.0)
flat(pd, pd2, combineby=:function)
# Of course, this is most useful when comparing different algorithms or commits (use `reload` or [Revise.jl](https://github.com/timholy/Revise.jl) to update your code). The differences in the above table are just noise.
# # Tree view
#
# FProfile's tree view looks the same as `Base.Profile.print(format=:tree)`. The numbers represent raw counts. (If some branches seem out of place, see [this issue](https://github.com/JuliaLang/julia/issues/9689))
tr = tree(pd)
# Like `flat` reports, trees can be aggregated by `:specialization, :method, :file, :function`, or `:module`:
tree(pd, combineby=:module)
# If you're only interested in a particular module/file/method/function, you can pass it to `tree`, along with an optional _neighborhood range_.
tr_deriv = tree(pd, second_derivative, -1:1) # -1:1 = show one level of callers and one level of called functions
# Trees are an indexable, prunable (use `prune(tree, depth)`) and filterable datastructure. Use the accessors (see above) and `is_inline/is_C_call` in your `filter` predicate.
# # ProfileView integration
#
# `ProfileData` objects can be passed to `ProfileView.view`. This is purely a convenience; it's equivalent to normal ProfileView usage. See [ProfileView.jl](https://github.com/timholy/ProfileView.jl) for details.
#
# ```julia
# using ProfileView
# pd = @fprofile ...
# ProfileView.view(pd)
# ```
# # Backtraces
#
# (if you want to build your own analysis)
#
# The raw profiler data is available either through `Base.Profile.retrieve()`, or through `pd.data` and `pd.lidict`. However, you might find `FProfile.backtraces(::ProfileData)` more immediately useful.
count, trace = backtraces(pd)[1] # get the first unique backtrace
@show count # the number of times that trace occurs in the raw data
trace
# Use the `get_method, get_file, ...` functions on `StackFrame` objects (see above). `tree(pd::ProfileData)` is defined as `tree(backtraces(pd))`, and similarly for `flat`, so you can modify the backtraces and get a tree/flat view of the results.
| Manual.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "-"}
# ## Course Announcements
#
# **Due Friday**:
# - D4
# - Q4
# - A2
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Inference: Linear Regression
#
# - **simple linear regression**
# - effect size
# - p-value
# - interpretation
# - **multiple linear regression**
# - confounding
# - working with **transformed data**
# + slideshow={"slide_type": "slide"}
import pandas as pd
import numpy as np
# Import seaborn and apply its plotting styles
import seaborn as sns
sns.set(style="white", font_scale=2)
# import matplotlib
import matplotlib as mpl
import matplotlib.pyplot as plt
# set plotting size parameter
plt.rcParams['figure.figsize'] = (17, 7)
# Statmodels & patsy
import patsy
import statsmodels.api as sm
import warnings
warnings.filterwarnings('ignore')
from scipy.stats import skewnorm
#improve resolution
#comment this line if erroring on your machine/screen
# %config InlineBackend.figure_format ='retina'
# + [markdown] slideshow={"slide_type": "slide"}
# ## The Question
#
# Does Poverty Percentage affect Teen Birth Rate?
# + [markdown] slideshow={"slide_type": "fragment"}
# $H_o$: There is no relationship between poverty percentage and teen birth rate ($\beta = 0$)
#
# $H_a$: There is a relationship between poverty percentage and teen birth rate ($\beta \ne 0$)
# + [markdown] slideshow={"slide_type": "slide"}
# ## The Data
# + slideshow={"slide_type": "fragment"}
# read in file; specify that it is tab-separated file
df = pd.read_csv('https://raw.githubusercontent.com/shanellis/datasets/master/index.txt', sep='\t')
df.head()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Assumptions of Linear Regression
#
# 1. Linear relationship
# 2. No multicollinearity
# 3. No auto-correlation
# 4. Homoscedasticity
# + [markdown] slideshow={"slide_type": "slide"}
# ## Exploratory Plot
# + slideshow={"slide_type": "fragment"}
sns.scatterplot(x='PovPct', y='TeenBrth', data=df, s=100)
plt.xlabel('Poverty Percentage')
plt.ylabel('Teen Birth Rate');
# + [markdown] slideshow={"slide_type": "fragment"}
# We see there is some sort of relationship here, but we want to *measure that realtionship*. Given that the increase appears linear over time (Assumption #1), linear regreasssion allows us to do that.
# + slideshow={"slide_type": "fragment"}
plt.subplot(1, 2, 1)
sns.distplot(df['PovPct'])
plt.xlabel('Poverty Percentage')
plt.subplot(1, 2, 2)
sns.distplot(df['TeenBrth'])
plt.xlabel('Teen Birth Rate');
# + [markdown] slideshow={"slide_type": "slide"}
# ## Linear Regression
# + [markdown] slideshow={"slide_type": "fragment"}
# $$outcome = \beta_0 + \beta_1*predictor$$
# + [markdown] slideshow={"slide_type": "fragment"}
# First, let's be sure we're on the same page about what our outcome is. Here, we're intererested in whether **Poverty Percentage (predictor)** impacts **Teen Birth Rate (outcome)**.
# + slideshow={"slide_type": "fragment"}
# We can specify our model matrix using `pastsy`.
outcome, predictors = patsy.dmatrices('TeenBrth ~ PovPct', df)
model = sm.OLS(outcome, predictors)
# + [markdown] slideshow={"slide_type": "fragment"}
# And, then we just have to fit the model and look at the results.
# + slideshow={"slide_type": "slide"}
## fit the model
results = model.fit()
## look at the results
print(results.summary())
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Clicker Question #1
#
# What is the effect size of the relationship between Poverty Percentage and Teen Birth Rate?
#
# - A) 15.67
# - B) 2.03
# - C) 4.032
# - D) 0.495
# + [markdown] slideshow={"slide_type": "fragment"}
# There is a lot of information in there. Let's focus on the three pieces we discussed last lecture:
#
# - `coef` : $\beta_1$ estimate explaining the effect size
# - `std err` : standard error
# - `P>|t|` : the p-value
# + [markdown] slideshow={"slide_type": "slide"}
# ## Interpretation
# + [markdown] slideshow={"slide_type": "fragment"}
# $$outcome = \beta_0 + \beta_1*predictor$$
# + [markdown] slideshow={"slide_type": "fragment"}
# $$ Teen Birth = 15.67 + 2.03 * Poverty Percentage $$
# + [markdown] slideshow={"slide_type": "fragment"}
# If the Poverty Percentage were 0, the Teen Birth Rate would be **15.67** (The Intercept, $\beta_0$)
# + [markdown] slideshow={"slide_type": "fragment"}
# For every 1 unit increase in Poverty Percentage, you expect to see a **2.03** unit increase in Teen Birth Rate (The effect size, $\beta_1$)
# + slideshow={"slide_type": "slide"}
## look at the results
print(results.summary())
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Clicker Question #2
#
# Which value represents the expected Teen Birth Rate if the Poverty Percentage were 0?
#
# - A) 15.67
# - B) 2.03
# - C) 4.032
# - D) 0.495
# + slideshow={"slide_type": "fragment"}
# Plot the orginal data (as before)
sns.scatterplot(x='PovPct', y='TeenBrth', alpha=0.3, data=df, s=100)
# Generate and plot the model fit line
xs = np.arange(df['PovPct'].min(), df['PovPct'].max())
ys = 2.0255 * xs + 15.67
plt.plot(xs, ys, '--k', linewidth=4, label='Model')
plt.xlabel('Poverty Percentage')
plt.ylabel('Teen Birth Rate')
plt.legend();
# + [markdown] slideshow={"slide_type": "fragment"}
# The model (the line) mathematically describes the relationship between the data points, but it doesn't explain the relationship *perfectly*. (All models are wrong!)
# + [markdown] slideshow={"slide_type": "notes"}
# Note that this line is drawn in the following way:
# - if you were to draw a perpendicular line from each point to the line and calculate that distance
# - if you were to sum the distance across all points
# - this line is the one that minimizes that sum
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Clicker Question #3
#
# If I told you the Poverty Percentage of a state was 15, what would you estimate would be its Teen Birth Rate?
# - A) ~ 0
# - B) ~ 30
# - C) ~ 40
# - D) ~ 50
# - E) ~ 60
# + slideshow={"slide_type": "slide"}
## look at the results
print(results.summary())
# + [markdown] slideshow={"slide_type": "fragment"}
# $$ Teen Birth = 15.67 + 2.03 * Poverty Percentage $$
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Clicker Question #4
#
# Which of the following is a reasonable estimate (guess) for a state with a Poverty Percentage of 20?
#
# - A) 55.99
# - B) 56.27
# - C) 56.5
# - D) A - C
# - E) None of the above
# + [markdown] slideshow={"slide_type": "slide"}
# ## Estimates
#
# If I told you a locations' Poverty Percentage, what would you guess its Teen Birth Rate would be?
# + [markdown] slideshow={"slide_type": "fragment"}
# $$ Teen Birth = 15.67 + 2.03 * Poverty Percentage $$
# + slideshow={"slide_type": "fragment"}
## if Poverty Percentage were 18
birth_rate = 15.67 + 2.03 * 18
birth_rate
# + slideshow={"slide_type": "fragment"}
## if Poverty Percentage were 12
birth_rate = 15.67 + 2.03 * 12
birth_rate
# + [markdown] slideshow={"slide_type": "slide"}
# ### Clicker Question #5
#
# What is our conclusion from this analysis? (Question: Does Poverty Percentage affect Teen Birth Rate?)
#
# - A) Reject the null; There is no relationship between Poverty Percentage and Teen Birth Rate
# - B) Reject the null; There is a relationship between Poverty Percentage and Teen Birth Rate
# - C) Fail to reject the null; There is no relationship between Poverty Percentage and Teen Birth Rate
# - D) Fail to reject the null; There is a relationship between Poverty Percentage and Teen Birth Rate
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Multiple Linear Regression
#
# So far, we've only been looking at the relationship of one measure (Poverty Percentage) on Teen Birth.
#
# *But*, Poverty Percentage is likely *not* the only thing that affects Teen Birth Rate.
#
# We could imagine that Violent Crime rates in a location may affect both Poverty Percentage and could possibly affect Teen Birth Rate. (A confounder!)
# + [markdown] slideshow={"slide_type": "fragment"}
# This is where **multiple linear regression** is incredibly helpful. Multiple linear regression allows you to measure the effect of multiple predictors on an outcome.
# + slideshow={"slide_type": "fragment"}
outcome, predictors = patsy.dmatrices('TeenBrth ~ PovPct + ViolCrime', df)
mod = sm.OLS(outcome, predictors)
res = mod.fit()
print(res.summary())
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Clicker Question #6
#
# Which has a larger effect on Teen Birth Rate?
#
# - A) Poverty Percentage
# - B) Violent Crime
# - C) Effect is equal across all predictors
# - D) No predictors have an effect on outcome
# + [markdown] slideshow={"slide_type": "fragment"}
# $$ Teen Birth = 17.68 + (1.63 * Poverty Percentage) + (0.4 * Violent Crime) $$
# + [markdown] slideshow={"slide_type": "fragment"}
# If the Poverty Percntage *and* Violent Crime were both 0, the Teen Birth Rate would be **17.68** (The Intercept)
# + [markdown] slideshow={"slide_type": "fragment"}
# Holding Violent crime constant, for every 1 unit increase in Poverty Percentage, you expect to see a **1.63** unit increase in Teen Birth Rate (The effect size, $\beta_1$)
# + [markdown] slideshow={"slide_type": "fragment"}
# Holding Poverty Percentage constant, for every 1 unit increase in Violent crime, you'd expect to see a **0.4** unit increase in Teen Birth Rate (The effect size, $\beta_2$)
# + [markdown] slideshow={"slide_type": "fragment"}
# ## No Multiple Colinearity
#
# If we have two predictors, we'll want to consider how similar the values are between predictors (typically, before your generate the model, but we'll do it after for sake of argument today).
# + slideshow={"slide_type": "fragment"}
# relationship between predictors
sns.scatterplot(x='PovPct', y='ViolCrime', data=df, s=100)
plt.xlabel('Poverty Percentage')
plt.ylabel('Violent Crime');
# + slideshow={"slide_type": "fragment"}
df[df['ViolCrime'] > 50]
# + [markdown] slideshow={"slide_type": "slide"}
# ### Outlier handling
# + slideshow={"slide_type": "fragment"}
# removing DC
df = df[df['ViolCrime'] < 50]
# + slideshow={"slide_type": "fragment"}
# relationship between predictors
# after removing outlier
sns.scatterplot(x='PovPct', y='ViolCrime', data=df, s=100)
plt.xlabel('Poverty Percentage')
plt.ylabel('Violent Crime');
# + slideshow={"slide_type": "fragment"}
# run model
outcome, predictors = patsy.dmatrices('TeenBrth ~ PovPct + ViolCrime', df)
mod = sm.OLS(outcome, predictors)
res = mod.fit()
print(res.summary())
# + [markdown] slideshow={"slide_type": "fragment"}
# Holding Violent crime constant, for every 1 unit increase in Poverty Percentage, you expect to see a **1.19** unit increase in Teen Birth Rate (The effect size, $\beta_1$)
# + [markdown] slideshow={"slide_type": "fragment"}
# Holding Poverty Percentage constant, for every 1 unit increase in Violent crime, you'd expect to see a **1.63** unit increase in Teen Birth Rate (The effect size, $\beta_2$)
# + [markdown] slideshow={"slide_type": "fragment"}
# ### Clicker Question #7
#
# Which has a larger effect on Teen Birth Rate?
#
# - A) Poverty Percentage
# - B) Violent Crime
# - C) Effect is equal across all predictors
# - D) No predictors have an effect on outcome
# + [markdown] slideshow={"slide_type": "fragment"}
# Removing the DC outlier led to a different estimate for the effect of violent crime in these data. The inclusion or removal of data in your analyses can affect the results, so it's always important to carefully consider what question you're asking and what data you have.
# + [markdown] slideshow={"slide_type": "slide"}
# # Regression with transformed data
#
# So far, we've been working with data that were approximately Normal and didn't require transformation. But that won't always be the case...
# + slideshow={"slide_type": "fragment"}
sleep = pd.read_csv('https://raw.githubusercontent.com/shanellis/datasets/master/msleep.csv')
sleep.head()
# + [markdown] slideshow={"slide_type": "fragment"}
# What if we were interested in quantifying the **effect that REM sleep has on total sleep**?
#
# We may hypothesize that the more REM sleep an animal gets the less total sleep it needs.
# + slideshow={"slide_type": "fragment"}
sns.distplot(sleep['sleep_rem'][sleep['sleep_rem'].notnull()],hist=True)
plt.xlabel('REM Sleep');
# -
# We saw previously that these data are skewed right, and discussed that transforming these data could help us use them in analysis...
# + slideshow={"slide_type": "fragment"}
sns.scatterplot(x='sleep_rem', y='sleep_total', data=sleep, s=100)
plt.xlabel('REM Sleep')
plt.ylabel('Total Sleep');
# + [markdown] slideshow={"slide_type": "subslide"}
# This relationship is not linear...so linear regression would not be appropriate
# + [markdown] slideshow={"slide_type": "slide"}
# ### Log Transformation
# + slideshow={"slide_type": "fragment"}
# add log-transofmed column to sleep_rem & brainwt
sleep['sleep_rem10'] = np.log10(sleep['sleep_rem'])
# + slideshow={"slide_type": "fragment"}
sns.distplot(sleep['sleep_rem10'][sleep['sleep_rem'].notnull()], hist=True)
plt.xlabel('log10(REM Sleep)');
# + slideshow={"slide_type": "fragment"}
sns.scatterplot(x='sleep_rem10', y='sleep_total', data=sleep, s=100)
plt.xlabel('log10(REM Sleep)')
plt.ylabel('Total Sleep');
# + [markdown] slideshow={"slide_type": "slide"}
# ### Linear regression with transformed data
# + slideshow={"slide_type": "fragment"}
# carry out regression using log-transformed predictor
outcome, predictors = patsy.dmatrices('sleep_total ~ sleep_rem10', sleep)
mod = sm.OLS(outcome, predictors)
res = mod.fit()
print(res.summary())
# + [markdown] slideshow={"slide_type": "fragment"}
# Here, we see that the $\beta_1$ estimate for `sleep_rem10` is 10.89....but remember that this value is on the log scale.
# + [markdown] slideshow={"slide_type": "fragment"}
# $$ Total Sleep = 8.57 + 10.89 * log_{10}(REM Sleep) $$
# + [markdown] slideshow={"slide_type": "fragment"}
# To interpret this coefficient, we would say that
#
# a 1 unit increase in REM sleep, Total Sleep increases by $\approx$ $10.89/100$ units.
# + slideshow={"slide_type": "fragment"}
# interpreting a log transformed coefficient
10.89 / 100
# + [markdown] slideshow={"slide_type": "fragment"}
# So, for each 1 hour increase in REM Sleep, Total Sleep increases by 0.11 hours.
#
# That's not a large effect, but it is different than what we hypothesized at the beginning!
# + [markdown] slideshow={"slide_type": "notes"}
# More on interpretation of log transformed variables can be read [here](https://www.cscu.cornell.edu/news/statnews/stnews83.pdf)
# + [markdown] slideshow={"slide_type": "notes"}
# ### Why is log-transformation not math trickery?
#
# First, some reading on data transformation can be found [here](http://fmwww.bc.edu/repec/bocode/t/transint.html). It's not beautifully formatted, but I think it is written in a more-straightforward manner than some [other](https://stats.stackexchange.com/questions/4831/regression-transforming-variables/4833#4833) [places](https://stats.stackexchange.com/questions/298/in-linear-regression-when-is-it-appropriate-to-use-the-log-of-an-independent-va) [online](https://www.researchgate.net/post/Why_do_we_do_transformation_before_data_analysis).
# + [markdown] slideshow={"slide_type": "notes"}
# The first thing to remember is there is (in most cases) nothing special about how the data are originally expressed.
#
# In our example above, there is nothing about "hours" as the unit that was chosen that makes these data "correct".
#
# So, while it _feels_ like data transformation is trickery, our initial unit of hours is...in some ways arbitrary and something that we chose.
#
# This is where we'll start with our argument that it's ok to transform (or think of it as *re-express*) our data
# so that it can be (still-accurately) used with well-studied models.
# + [markdown] slideshow={"slide_type": "notes"}
# To make the point a little more concretely, pH (measurement of acidity) is measured on the log scale. It _could_ be measured (transformed) off of the log scale. Those measurements would still explain a compound's acidity...it would just be on a different scale.
# + [markdown] slideshow={"slide_type": "notes"}
# In other words:
#
# > "Transformations are needed because there is no guarantee that the world works on the scales it happens to be measured on."
# + [markdown] slideshow={"slide_type": "notes"}
# What *does* differ however, is the interprtation. Linear scales tell us absolute change, while logarithmic scales tell us relative change.
# + slideshow={"slide_type": "notes"}
dat = skewnorm.rvs(5, 1, size=1000)
sns.distplot(dat, kde=False, bins=20);
# + slideshow={"slide_type": "notes"}
dat_log = np.log10(dat)
sns.distplot(dat_log, kde=False, bins=20);
# + slideshow={"slide_type": "notes"}
#original value
dat[0]
# + slideshow={"slide_type": "notes"}
# log 10 transformed value
dat_log[0]
# + slideshow={"slide_type": "notes"}
# math that is actually happening
10.0*dat_log[0]
# + slideshow={"slide_type": "notes"}
# linear scale tells you asolute
# difference between two points
dat[1] - dat[0]
# + slideshow={"slide_type": "notes"}
# log scale tells you relative
# difference between two points
dat_log[1] - dat_log[0]
# -
| 05_inference/05_02_inference.ipynb |
# # In this notebook....
#
# Using HypnosPy, we first classified sleep using the most common heuristic algorithms, such as Cole-Kripke, Oakley, Sadeh, Sazonov and the Scripps Clinic algorithm.
#
# Once sleep was classified, we derived the well-established sleep metrics, such as arousal, awakening, sleep efficiency, total sleep time and total wake time.
#
# All this was done by simply using the _SleepWakeAnalysis_ and _SleepMetrics_ modules of HypnosPy.
#
# Next, we calculated the Pearson correlation between the sleep metrics and the ones derived from ground truth PSG.
#
# Finally, we produced a compelling visualization of these results using the software.
# This visualization extends the finds of [previous research](https://www.nature.com/articles/s41746-019-0126-9) and allows us to understand, for the first time, that although all algorithms perform similarly at detecting total wake time, they all perform better at detecting total sleep time than total wake time and in particular Oakley and Sazonov are better at awakening and arousal detection than the rest of the algorithms.
#
# These findings are particularly informative for large population studies where quantifying awakenings/arousals during the night period is of particular interest for the research question that the study is trying to address.
from glob import glob
from hypnospy import Wearable, Experiment
from hypnospy.data import RawProcessing
from hypnospy.analysis import SleepWakeAnalysis, Viewer, SleepMetrics
# In the MESA Sleep study, PSG was conducted during one single night and actigraphy during 7 whole days.
# Actigraph and PSG were aligned elsewhere using this overlap dataset: https://sleepdata.org/datasets/mesa/files/overlap
# Hypnospy can easily process the generic data created with the ``RawProcessing'' module shown below.
# Note that all this data (PSG, actigraph and overlap) can be obtained from https://sleepdata.org upon request
file_path = "../data/examples_mesa/collection_mesa_psg/*.csv"
# Configure an Experiment
exp = Experiment()
for inputfile in glob(file_path):
# This is how a preprocessed actigraph + PSG file looks like:
#mesaid,linetime,offwrist,activity,marker,white,red,green,blue,wake,interval,dayofweek,daybymidnight,daybynoon,stage,gt
#1,1900-01-01 20:30:00,0,0.0,0.0,0.07,0.0292,0.0,0.0059,0.0,ACTIVE,5,1,1,0,0
#1,1900-01-01 20:30:30,0,0.0,0.0,0.07,0.0292,0.0,0.0059,0.0,ACTIVE,5,1,1,0,0
preprocessed = RawProcessing(inputfile,
cols_for_activity=["activity"], # Activity information
col_for_datetime="linetime", strftime="%Y-%m-%d %H:%M:%S", # Datetime information
col_for_pid="mesaid") # Participant information
w = Wearable(preprocessed)
w.change_start_hour_for_experiment_day(15) # We define an experiment day as the time from 15pm to 15pm the next day.
exp.add_wearable(w)
exp.set_freq_in_secs(30) # This is not a required step, but shows how easily one can change the data sampling frequency
sw = SleepWakeAnalysis(exp) # This module allows the use of sleep/wake algorithms for the night that PSG was conducted
for sleep_alg in ["Cole-Kripke", "ScrippsClinic", "Oakley", "Sadeh", "Sazonov"]:
sw.run_sleep_algorithm(sleep_alg, inplace=True)
# We can at any time visualize the signals and annotations for one (or all) participants
v = Viewer(exp.get_wearable(pid="1"))
v.view_signals_multipanel(signals=["activity", "gt"], signals_as_area=["ScrippsClinic", "Oakley", "Cole-Kripke", "Sadeh", "Sazonov"],
select_day=0, zoom=["20:00:00", "09:00:00"], colors={"signal": "black", "area": ["green", "blue", "red", "purple", "orange"]},
alphas={"area": 0.85}, labels={"signal": ["Activity", "PSG"], "area": ["Scripps Clinic", "Oakley", "Cole-Kripke", "Sadeh", "Sazonov"]})
# Now, we extract sleep metrics and compare the performance of different algorithms
sm = SleepMetrics(exp)
results = []
sleep_metrics = ["sleepEfficiency", "awakening", "arousal", "totalSleepTime", "totalWakeTime"]
for sleep_alg in ["Cole-Kripke", "ScrippsClinic", "Oakley", "Sadeh", "Sazonov"]:
results.extend(sm.compare_sleep_metrics(ground_truth="gt", wake_sleep_alg=sleep_alg, sleep_metrics=sleep_metrics,
how="pearson"))
fig = Viewer.plot_sleep_wake_by_metrics_metrics(results, figname='heatmap_sleepalg_metrics.pdf')
| mdpi_sensors/hypnospy_sleepmetrics_by_sleepwakealgs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Forecasting, updating datasets, and the "news"
#
# In this notebook, we describe how to use Statsmodels to compute the impacts of updated or revised datasets on out-of-sample forecasts or in-sample estimates of missing data. We follow the approach of the "Nowcasting" literature (see references at the end), by using a state space model to compute the "news" and impacts of incoming data.
#
# **Note**: this notebook applies to Statsmodels v0.12+. In addition, it only applies to the state space models or related classes, which are: `sm.tsa.statespace.ExponentialSmoothing`, `sm.tsa.arima.ARIMA`, `sm.tsa.SARIMAX`, `sm.tsa.UnobservedComponents`, `sm.tsa.VARMAX`, and `sm.tsa.DynamicFactor`.
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
macrodata = sm.datasets.macrodata.load_pandas().data
macrodata.index = pd.period_range('1959Q1', '2009Q3', freq='Q')
# -
# Forecasting exercises often start with a fixed set of historical data that is used for model selection and parameter estimation. Then, the fitted selected model (or models) can be used to create out-of-sample forecasts. Most of the time, this is not the end of the story. As new data comes in, you may need to evaluate your forecast errors, possibly update your models, and create updated out-of-sample forecasts. This is sometimes called a "real-time" forecasting exercise (by contrast, a pseudo real-time exercise is one in which you simulate this procedure).
#
# If all that matters is minimizing some loss function based on forecast errors (like MSE), then when new data comes in you may just want to completely redo model selection, parameter estimation and out-of-sample forecasting, using the updated datapoints. If you do this, your new forecasts will have changed for two reasons:
#
# 1. You have received new data that gives you new information
# 2. Your forecasting model or the estimated parameters are different
#
# In this notebook, we focus on methods for isolating the first effect. The way we do this comes from the so-called "nowcasting" literature, and in particular Bańbura, Giannone, and Reichlin (2011), Bańbura and Modugno (2014), and Bańbura et al. (2014). They describe this exercise as computing the "**news**", and we follow them in using this language in Statsmodels.
#
# These methods are perhaps most useful with multivariate models, since there multiple variables may update at the same time, and it is not immediately obvious what forecast change was created by what updated variable. However, they can still be useful for thinking about forecast revisions in univariate models. We will therefore start with the simpler univariate case to explain how things work, and then move to the multivariate case afterwards.
# **Note on revisions**: the framework that we are using is designed to decompose changes to forecasts from newly observed datapoints. It can also take into account *revisions* to previously published datapoints, but it does not decompose them separately. Instead, it only shows the aggregate effect of "revisions".
# **Note on `exog` data**: the framework that we are using only decomposes changes to forecasts from newly observed datapoints for *modeled* variables. These are the "left-hand-side" variables that in Statsmodels are given in the `endog` arguments. This framework does not decompose or account for changes to unmodeled "right-hand-side" variables, like those included in the `exog` argument.
# ### Simple univariate example: AR(1)
#
# We will begin with a simple autoregressive model, an AR(1):
#
# $$y_t = \phi y_{t-1} + \varepsilon_t$$
#
# - The paramater $\phi$ captures the persistence of the series
#
# We will use this model to forecast inflation.
#
# To make it simpler to describe the forecast updates in this notebook, we will work with inflation data that has been de-meaned, but it is straightforward in practice to augment the model with a mean term.
#
# De-mean the inflation series
y = macrodata['infl'] - macrodata['infl'].mean()
# #### Step 1: fitting the model on the available dataset
# Here, we'll simulate an out-of-sample exercise, by constructing and fitting our model using all of the data except the last five observations. We'll assume that we haven't observed these values yet, and then in subsequent steps we'll add them back into the analysis.
y_pre = y.iloc[:-5]
y_pre.plot(figsize=(15, 3), title='Inflation');
# To construct forecasts, we first estimate the parameters of the model. This returns a results object that we will be able to use produce forecasts.
mod_pre = sm.tsa.arima.ARIMA(y_pre, order=(1, 0, 0), trend='n')
res_pre = mod_pre.fit()
print(res_pre.summary())
# Creating the forecasts from the results object `res` is easy - you can just call the `forecast` method with the number of forecasts you want to construct. In this case, we'll construct four out-of-sample forecasts.
# +
# Compute the forecasts
forecasts_pre = res_pre.forecast(4)
# Plot the last 3 years of data and the four out-of-sample forecasts
y_pre.iloc[-12:].plot(figsize=(15, 3), label='Data', legend=True)
forecasts_pre.plot(label='Forecast', legend=True);
# -
# For the AR(1) model, it is also easy to manually construct the forecasts. Denoting the last observed variable as $y_T$ and the $h$-step-ahead forecast as $y_{T+h|T}$, we have:
#
# $$y_{T+h|T} = \hat \phi^h y_T$$
#
# Where $\hat \phi$ is our estimated value for the AR(1) coefficient. From the summary output above, we can see that this is the first parameter of the model, which we can access from the `params` attribute of the results object.
# +
# Get the estimated AR(1) coefficient
phi_hat = res_pre.params[0]
# Get the last observed value of the variable
y_T = y_pre.iloc[-1]
# Directly compute the forecasts at the horizons h=1,2,3,4
manual_forecasts = pd.Series([phi_hat * y_T, phi_hat**2 * y_T,
phi_hat**3 * y_T, phi_hat**4 * y_T],
index=forecasts_pre.index)
# We'll print the two to double-check that they're the same
print(pd.concat([forecasts_pre, manual_forecasts], axis=1))
# -
# #### Step 2: computing the "news" from a new observation
#
# Suppose that time has passed, and we have now received another observation. Our dataset is now larger, and we can evaluate our forecast error and produce updated forecasts for the subsequent quarters.
# +
# Get the next observation after the "pre" dataset
y_update = y.iloc[-5:-4]
# Print the forecast error
print('Forecast error: %.2f' % (y_update.iloc[0] - forecasts_pre.iloc[0]))
# -
# To compute forecasts based on our updated dataset, we will create an updated results object `res_post` using the `append` method, to append on our new observation to the previous dataset.
#
# Note that by default, the `append` method does not re-estimate the parameters of the model. This is exactly what we want here, since we want to isolate the effect on the forecasts of the new information only.
# +
# Create a new results object by passing the new observations to the `append` method
res_post = res_pre.append(y_update)
# Since we now know the value for 2008Q3, we will only use `res_post` to
# produce forecasts for 2008Q4 through 2009Q2
forecasts_post = pd.concat([y_update, res_post.forecast('2009Q2')])
print(forecasts_post)
# -
# In this case, the forecast error is quite large - inflation was more than 10 percentage points below the AR(1) models' forecast. (This was largely because of large swings in oil prices around the global financial crisis).
# To analyse this in more depth, we can use Statsmodels to isolate the effect of the new information - or the "**news**" - on our forecasts. This means that we do not yet want to change our model or re-estimate the parameters. Instead, we will use the `news` method that is available in the results objects of state space models.
#
# Computing the news in Statsmodels always requires a *previous* results object or dataset, and an *updated* results object or dataset. Here we will use the original results object `res_pre` as the previous results and the `res_post` results object that we just created as the updated results.
# Once we have previous and updated results objects or datasets, we can compute the news by calling the `news` method. Here, we will call `res_pre.news`, and the first argument will be the updated results, `res_post` (however, if you have two results objects, the `news` method could can be called on either one).
#
# In addition to specifying the comparison object or dataset as the first argument, there are a variety of other arguments that are accepted. The most important specify the "impact periods" that you want to consider. These "impact periods" correspond to the forecasted periods of interest; i.e. these dates specify with periods will have forecast revisions decomposed.
#
# To specify the impact periods, you must pass two of `start`, `end`, and `periods` (similar to the Pandas `date_range` method). If your time series was a Pandas object with an associated date or period index, then you can pass dates as values for `start` and `end`, as we do below.
# Compute the impact of the news on the four periods that we previously
# forecasted: 2008Q3 through 2009Q2
news = res_pre.news(res_post, start='2008Q3', end='2009Q2')
# Note: one alternative way to specify these impact dates is
# `start='2008Q3', periods=4`
# The variable `news` is an object of the class `NewsResults`, and it contains details about the updates to the data in `res_post` compared to `res_pre`, the new information in the updated dataset, and the impact that the new information had on the forecasts in the period between `start` and `end`.
#
# One easy way to summarize the results are with the `summary` method.
news.summary()
# **Summary output**: the default summary for this news results object printed four tables:
#
# 1. Summary of the model and datasets
# 2. Details of the news from updated data
# 3. Summary of the impacts of the new information on the forecasts between `start='2008Q3'` and `end='2009Q2'`
# 4. Details of how the updated data led to the impacts on the forecasts between `start='2008Q3'` and `end='2009Q2'`
#
# These are described in more detail below.
#
# *Notes*:
#
# - There are a number of arguments that can be passed to the `summary` method to control this output. Check the documentation / docstring for details.
# - Table (4), showing details of the updates and impacts, can become quite large if the model is multivariate, there are multiple updates, or a large number of impact dates are selected. It is only shown by default for univariate models.
# **First table: summary of the model and datasets**
#
# The first table, above, shows:
#
# - The type of model from which the forecasts were made. Here this is an ARIMA model, since an AR(1) is a special case of an ARIMA(p,d,q) model.
# - The date and time at which the analysis was computed.
# - The original sample period, which here corresponds to `y_pre`
# - The endpoint of the updated sample period, which here is the last date in `y_post`
# **Second table: the news from updated data**
#
# This table simply shows the forecasts from the previous results for observations that were updated in the updated sample.
#
# *Notes*:
#
# - Our updated dataset `y_post` did not contain any *revisions* to previously observed datapoints. If it had, there would be an additional table showing the previous and updated values of each such revision.
# **Third table: summary of the impacts of the new information**
#
# *Columns*:
#
# The third table, above, shows:
#
# - The previous forecast for each of the impact dates, in the "estimate (prev)" column
# - The impact that the new information (the "news") had on the forecasts for each of the impact dates, in the "impact of news" column
# - The updated forecast for each of the impact dates, in the "estimate (new)" column
#
# *Notes*:
#
# - In multivariate models, this table contains additional columns describing the relevant impacted variable for each row.
# - Our updated dataset `y_post` did not contain any *revisions* to previously observed datapoints. If it had, there would be additional columns in this table showing the impact of those revisions on the forecasts for the impact dates.
# - Note that `estimate (new) = estimate (prev) + impact of news`
# - This table can be accessed independently using the `summary_impacts` method.
#
# *In our example*:
#
# Notice that in our example, the table shows the values that we computed earlier:
#
# - The "estimate (prev)" column is identical to the forecasts from our previous model, contained in the `forecasts_pre` variable.
# - The "estimate (new)" column is identical to our `forecasts_post` variable, which contains the observed value for 2008Q3 and the forecasts from the updated model for 2008Q4 - 2009Q2.
# **Fourth table: details of updates and their impacts**
#
# The fourth table, above, shows how each new observation translated into specific impacts at each impact date.
#
# *Columns*:
#
# The first three columns table described the relevant **update** (an "updated" is a new observation):
#
# - The first column ("update date") shows the date of the variable that was updated.
# - The second column ("forecast (prev)") shows the value that would have been forecasted for the update variable at the update date based on the previous results / dataset.
# - The third column ("observed") shows the actual observed value of that updated variable / update date in the updated results / dataset.
#
# The last four columns described the **impact** of a given update (an impact is a changed forecast within the "impact periods").
#
# - The fourth column ("impact date") gives the date at which the given update made an impact.
# - The fifth column ("news") shows the "news" associated with the given update (this is the same for each impact of a given update, but is just not sparsified by default)
# - The sixth column ("weight") describes the weight that the "news" from the given update has on the impacted variable at the impact date. In general, weights will be different between each "updated variable" / "update date" / "impacted variable" / "impact date" combination.
# - The seventh column ("impact") shows the impact that the given update had on the given "impacted variable" / "impact date".
#
# *Notes*:
#
# - In multivariate models, this table contains additional columns to show the relevant variable that was updated and variable that was impacted for each row. Here, there is only one variable ("infl"), so those columns are suppressed to save space.
# - By default, the updates in this table are "sparsified" with blanks, to avoid repeating the same values for "update date", "forecast (prev)", and "observed" for each row of the table. This behavior can be overridden using the `sparsify` argument.
# - Note that `impact = news * weight`.
# - This table can be accessed independently using the `summary_details` method.
#
# *In our example*:
#
# - For the update to 2008Q3 and impact date 2008Q3, the weight is equal to 1. This is because we only have one variable, and once we have incorporated the data for 2008Q3, there is no no remaining ambiguity about the "forecast" for this date. Thus all of the "news" about this variable at 2008Q3 passes through to the "forecast" directly.
# #### Addendum: manually computing the news, weights, and impacts
#
# For this simple example with a univariate model, it is straightforward to compute all of the values shown above by hand. First, recall the formula for forecasting $y_{T+h|T} = \phi^h y_T$, and note that it follows that we also have $y_{T+h|T+1} = \phi^h y_{T+1}$. Finally, note that $y_{T|T+1} = y_T$, because if we know the value of the observations through $T+1$, we know the value of $y_T$.
#
# **News**: The "news" is nothing more than the forecast error associated with one of the new observations. So the news associated with observation $T+1$ is:
#
# $$n_{T+1} = y_{T+1} - y_{T+1|T} = Y_{T+1} - \phi Y_T$$
#
# **Impacts**: The impact of the news is the difference between the updated and previous forecasts, $i_h \equiv y_{T+h|T+1} - y_{T+h|T}$.
#
# - The previous forecasts for $h=1, \dots, 4$ are: $\begin{pmatrix} \phi y_T & \phi^2 y_T & \phi^3 y_T & \phi^4 y_T \end{pmatrix}'$.
# - The updated forecasts for $h=1, \dots, 4$ are: $\begin{pmatrix} y_{T+1} & \phi y_{T+1} & \phi^2 y_{T+1} & \phi^3 y_{T+1} \end{pmatrix}'$.
#
# The impacts are therefore:
#
# $$\{ i_h \}_{h=1}^4 = \begin{pmatrix} y_{T+1} - \phi y_T \\ \phi (Y_{T+1} - \phi y_T) \\ \phi^2 (Y_{T+1} - \phi y_T) \\ \phi^3 (Y_{T+1} - \phi y_T) \end{pmatrix}$$
#
# **Weights**: To compute the weights, we just need to note that it is immediate that we can rewrite the impacts in terms of the forecast errors, $n_{T+1}$.
#
# $$\{ i_h \}_{h=1}^4 = \begin{pmatrix} 1 \\ \phi \\ \phi^2 \\ \phi^3 \end{pmatrix} n_{T+1}$$
#
# The weights are then simply $w = \begin{pmatrix} 1 \\ \phi \\ \phi^2 \\ \phi^3 \end{pmatrix}$
# We can check that this is what the `news` method has computed.
# +
# Print the news, computed by the `news` method
print(news.news)
# Manually compute the news
print()
print((y_update.iloc[0] - phi_hat * y_pre.iloc[-1]).round(6))
# +
# Print the total impacts, computed by the `news` method
# (Note: news.total_impacts = news.revision_impacts + news.update_impacts, but
# here there are no data revisions, so total and update impacts are the same)
print(news.total_impacts)
# Manually compute the impacts
print()
print(forecasts_post - forecasts_pre)
# +
# Print the weights, computed by the `news` method
print(news.weights)
# Manually compute the weights
print()
print(np.array([1, phi_hat, phi_hat**2, phi_hat**3]).round(6))
# -
# ### Multivariate example: dynamic factor
#
# In this example, we'll consider forecasting monthly core price inflation based on the Personal Consumption Expenditures (PCE) price index and the Consumer Price Index (CPI), using a Dynamic Factor model. Both of these measures track prices in the US economy and are based on similar source data, but they have a number of definitional differences. Nonetheless, they track each other relatively well, so modeling them jointly using a single dynamic factor seems reasonable.
#
# One reason that this kind of approach can be useful is that the CPI is released earlier in the month than the PCE. One the CPI is released, therefore, we can update our dynamic factor model with that additional datapoint, and obtain an improved forecast for that month's PCE release. A more inolved version of this kind of analysis is available in Knotek and Zaman (2017).
# We start by downloading the core CPI and PCE price index data from [FRED](https://fred.stlouisfed.org/), converting them to annualized monthly inflation rates, removing two outliers, and de-meaning each series (the dynamic factor model does not
# +
import pandas_datareader as pdr
levels = pdr.get_data_fred(['PCEPILFE', 'CPILFESL'], start='1999', end='2019').to_period('M')
infl = np.log(levels).diff().iloc[1:] * 1200
infl.columns = ['PCE', 'CPI']
# Remove two outliers and de-mean the series
infl['PCE'].loc['2001-09':'2001-10'] = np.nan
# -
# To show how this works, we'll imagine that it is April 14, 2017, which is the data of the March 2017 CPI release. So that we can show the effect of multiple updates at once, we'll assume that we haven't updated our data since the end of January, so that:
#
# - Our **previous dataset** will consist of all values for the PCE and CPI through January 2017
# - Our **updated dataset** will additionally incorporate the CPI for February and March 2017 and the PCE data for February 2017. But it will not yet the PCE (the March 2017 PCE price index wasn't released until May 1, 2017).
# Previous dataset runs through 2017-02
y_pre = infl.loc[:'2017-01'].copy()
const_pre = np.ones(len(y_pre))
print(y_pre.tail())
# +
# For the updated dataset, we'll just add in the
# CPI value for 2017-03
y_post = infl.loc[:'2017-03'].copy()
y_post.loc['2017-03', 'PCE'] = np.nan
const_post = np.ones(len(y_post))
# Notice the missing value for PCE in 2017-03
print(y_post.tail())
# -
# We chose this particular example because in March 2017, core CPI prices fell for the first time since 2010, and this information may be useful in forecast core PCE prices for that month. The graph below shows the CPI and PCE price data as it would have been observed on April 14th$^\dagger$.
#
# -----
#
# $\dagger$ This statement is not entirely true, becuase both the CPI and PCE price indexes can be revised to a certain extent after the fact. As a result, the series that we're pulling are not exactly like those observed on April 14, 2017. This could be fixed by pulling the archived data from [ALFRED](https://alfred.stlouisfed.org/) instead of [FRED](https://fred.stlouisfed.org/), but the data we have is good enough for this tutorial.
# Plot the updated dataset
fig, ax = plt.subplots(figsize=(15, 3))
y_post.plot(ax=ax)
ax.hlines(0, '2009', '2017-06', linewidth=1.0)
ax.set_xlim('2009', '2017-06');
# To perform the exercise, we first construct and fit a `DynamicFactor` model. Specifically:
#
# - We are using a single dynamic factor (`k_factors=1`)
# - We are modeling the factor's dynamics with an AR(6) model (`factor_order=6`)
# - We have included a vector of ones as an exogenous variable (`exog=const_pre`), because the inflation series we are working with are not mean-zero.
mod_pre = sm.tsa.DynamicFactor(y_pre, exog=const_pre, k_factors=1, factor_order=6)
res_pre = mod_pre.fit()
print(res_pre.summary())
# With the fitted model in hand, we now construct the news and impacts associated with observing the CPI for March 2017. The updated data is for February 2017 and part of March 2017, and we'll examing the impacts on both March and April.
#
# In the univariate example, we first created an updated results object, and then passed that to the `news` method. Here, we're creating the news by directly passing the updated dataset.
#
# Notice that:
#
# 1. `y_post` contains the entire updated dataset (not just the new datapoints)
# 2. We also had to pass an updated `exog` array. This array must cover **both**:
# - The entire period associated with `y_post`
# - Any additional datapoints after the end of `y_post` through the last impact date, specified by `end`
#
# Here, `y_post` ends in March 2017, so we needed our `exog` to extend one more period, to April 2017.
# Create the news results
# Note
const_post_plus1 = np.ones(len(y_post) + 1)
news = res_pre.news(y_post, exog=const_post_plus1, start='2017-03', end='2017-04')
# > **Note**:
# >
# > In the univariate example, above, we first constructed a new results object, and then passed that to the `news` method. We could have done that here too, although there is an extra step required. Since we are requesting an impact for a period beyond the end of `y_post`, we would still need to pass the additional value for the `exog` variable during that period to `news`:
# >
# > ```python
# res_post = res_pre.apply(y_post, exog=const_post)
# news = res_pre.news(res_post, exog=[1.], start='2017-03', end='2017-04')
# ```
# Now that we have computed the `news`, printing `summary` is a convenient way to see the results.
# Show the summary of the news results
news.summary()
# Because we have multiple variables, by default the summary only shows the news from updated data along and the total impacts.
#
# From the first table, we can see that our updated dataset contains three new data points, with most of the "news" from these data coming from the very low reading in March 2017.
#
# The second table shows that these three datapoints substantially impacted the estimate for PCE in March 2017 (which was not yet observed). This estimate revised down by nearly 1.5 percentage points.
#
# The updated data also impacted the forecasts in the first out-of-sample month, April 2017. After incorporating the new data, the model's forecasts for CPI and PCE inflation in that month revised down 0.29 and 0.17 percentage point, respectively.
# While these tables show the "news" and the total impacts, they do not show how much of each impact was caused by each updated datapoint. To see that information, we need to look at the details tables.
#
# One way to see the details tables is to pass `include_details=True` to the `summary` method. To avoid repeating the tables above, however, we'll just call the `summary_details` method directly.
news.summary_details()
# This table shows that most of the revisions to the estimate of PCE in April 2017, described above, came from the news associated with the CPI release in March 2017. By contrast, the CPI release in February had only a little effect on the April forecast, and the PCE release in February had essentially no effect.
# ### Bibliography
#
# Bańbura, Marta, <NAME>, and <NAME>. "Nowcasting." The Oxford Handbook of Economic Forecasting. July 8, 2011.
#
# Bańbura, Marta, <NAME>, <NAME>, and <NAME>. "Now-casting and the real-time data flow." In Handbook of economic forecasting, vol. 2, pp. 195-237. Elsevier, 2013.
#
# Bańbura, Marta, and <NAME>. "Maximum likelihood estimation of factor models on datasets with arbitrary pattern of missing data." Journal of Applied Econometrics 29, no. 1 (2014): 133-160.
#
# Knotek, <NAME>., and <NAME>. "Nowcasting US headline and core inflation." Journal of Money, Credit and Banking 49, no. 5 (2017): 931-968.
| examples/notebooks/statespace_news.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Introduzione a Tensorflow
#
# TensorFlow (TF) è la API di Google per lo sviluppo di modelli di Deep Learning. Interagiremo con TF in Python, ma è possibile usare altri linguaggi, almeno a basso livello. La tipica applicazione TF lavora secondo lo schema riportato di seguito:
#
# <img src="https://pic1.zhimg.com/80/v2-8b46a7f55b77f0febfa3ad5084e25c3c_1440w.jpg" alt="Architettura TF" width="50%" />
#
# - Keras è una nota libreria per specificare ad alto livello i layer del modello che lavora anche con altri back-end
# - La Data API è il modulo `tf.data` di TF che serve a specificare le azioni da compiere sul data set ai fini di addestramento e test
# - L'Execution Engine di TF (che può essere locale o anche distribuito su più nodi di un cluster di calcolo) rappresenta il nucleo vero e proprio di TF. A questo livello, vengono esposte API in diversi linguaggi.
#
# Nell'execution engine, viene definita la _sessione_ di lavoro `tf.Session(...)` in cui si svolgono le operazioni atomiche (`tf.Operation`) sui dati in forma tensoriale (`tf.Tensor`).
#
# La sessione gestisce l'esecuzione di un _*grafo di computaizone*_ ovvero di una specifica _astratta_ della sequenza di operazioni secondo una struttura a grafo dove i nodi sono operazioni che restituiscono tensori che vanno in ingresso ad altri nodi secondo la struttura definita dagli archi.
#
# <img src="https://miro.medium.com/max/2994/1*vPb9E0Yd1QUAD0oFmAgaOw.png" alt="Esempio grafo computazione" width="50%" />
#
# L'esecuzione si ottiene attraverso il metodo `tf.Operation.run()` che è ereditato anche dalla sessione ovvero si può valutare un singolo tensore con `tf.Tensor.eval()`. La sessione va chiusa esplicitamente con il metodo `close()`.
#
#
# +
import tensorflow as tf
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# Sostituisce la vecchia chiamata tf.Session()
# che è deprecata da quando è stato rilasciato TF v.2
sess = tf.compat.v1.Session()
# chiediamo la lista dei device di calcolo presenti
# quelli di tipo XLA_GPU o XLA_CPU sono abilitati all'algebra lineare accelerata
for d in sess.list_devices():
print(d.name)
# Definiamo una semplice computazione che definisce un elemento (o meglio un **nodo**)
# costante di tipo stringa e lo esegue ottenendone la stampa
bye_bye = tf.constant('Hello World')
result = sess.run(bye_bye)
print(result)
print(f'The session is closed? {sess._closed}')
# chiudiamo la sessione
sess.close()
# -
# La sessione può essere invocata con diverse opzioni e, soprattutto, con la possibilità di definire un _context manager_ all'interno del quale specificare le operazioni che non richiede più la chiusura esplicita.
# +
with tf.compat.v1.Session() as sess:
# definizione di due valori costanti interi
n1=tf.constant(2)
n2=tf.constant(3)
print(n1.eval())
#n3 = n1 * n2 # questa **non è** la moltiplicaione tra interi,
# ma la tf.Operation di moltiplicazione tra tensori
n3 = tf.multiply(n1,n2)
print(n1,n2,n3,sep='\n')
#print(sess.run(n3))
print(n3.eval(), n3.dtype, n3.shape)
print(f'Session closed: {sess._closed}')
# -
# La sintassi completa del costruttore dell'oggetto `tf.compat.v1.Session` è:
#
# ```python
# tf.compat.v1.Session(target='',\ # engine di computazione da utilizzare, locale o distribuito
# graph=None,\ # grafo di computazione da utilizzare
# config=None) # oggetto con le specifiche particolari di configurazione
# ```
#
# Apriamo la sessione con una configurazione di default per l'allocazopne dinamica della computazione sui diversi device disponibili ed eseguiamo il grafo di computazione della figura precedente
# +
import tensorflow as tf
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# Usiamo esplicitamente l'oggetto di configurazione
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
allow_soft_placement=True, # gestione dinamica dell'allocazione dei device
log_device_placement=True)) # registrazione del log sull'allocazione dei device
with sess.as_default():
assert tf.compat.v1.get_default_session() is sess # impostiamo la nostra sessione configurata come quella di default
# La nostra computazione è: res = (a*b) / (a+b)
# ingressi
a = tf.constant(5)
b = tf.constant(3)
# operazioni intermedie
prod = tf.multiply(a,b)
sum = tf.add(a,b)
# uscita
#res = tf.div(prod,sum)
res = tf.math.divide(prod,sum)
#print(res.eval())
print(sess.run(res))
# +
# Creiamo tensori con diverse funzioni
mat = tf.constant([[1., 2., 3.], [4., 5., 6.]])
print(mat.shape, mat.dtype)
# +
mat_randn = tf.random.normal((3,3), mean=0, stddev=1.0) # A 3x3 random normal matrix.
mat_randu = tf.random.uniform((4,4), minval=0, maxval=1.0)
print(mat_randn)
with sess.as_default():
assert tf.compat.v1.get_default_session() is sess
print(mat_randn.eval())
print(mat_randu.eval())
# -
# Esempio di uso delle variabili
with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(allow_soft_placement=True)) as sess:
init_values = tf.random.uniform((4,4), minval=0, maxval=1.0)
t = tf.Variable(initial_value=init_values,name='myvar')
init = tf.compat.v1.global_variables_initializer()
print(sess.run(init))
print(sess.run(t))
# Definiamo i placeholder per z = 2x^2 + 2xy
with tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(allow_soft_placement=True)) as sess:
two = tf.constant(2.0)
x = tf.compat.v1.placeholder(tf.float32,shape=(None, 3))
y = tf.compat.v1.placeholder(tf.float32,shape=(None, 3))
z = tf.add(tf.multiply(two, tf.multiply(x, x)),\
tf.multiply(two, tf.multiply(x, y)))
print(sess.run(z, feed_dict={x: [[1., 2., 3.],[4., 5., 6.]], y: [[3., 4., 5.],[7., 8., 9.]]}))
| tensorflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: data
# language: python
# name: data
# ---
# # Notebook to try to get the CPI (Consumer Price Index) for USA and Uruguay from the World Bank API
# ## World Bank Explaination on the data
#
# ```
# Consumer price index (2010 = 100)
#
# Consumer price index reflects changes in the cost to the average consumer of acquiring a basket of goods and services that may be fixed or changed at specified intervals, such as yearly. The Laspeyres formula is generally used. Data are period averages.
#
# Source: International Monetary Fund, International Financial Statistics and data files.
# License: CC BY-4.0
# Base Period: 2010
#
# Development Relevance: A general and continuing increase in an economy’s price level is called inflation. The increase in the average prices of goods and services in the economy should be distinguished from a change in the relative prices of individual goods and services. Generally accompanying an overall increase in the price level is a change in the structure of relative prices, but it is only the average increase, not the relative price changes, that constitutes inflation. A commonly used measure of inflation is the consumer price index, which measures the prices of a representative basket of goods and services purchased by a typical household. The consumer price index is usually calculated on the basis of periodic surveys of consumer prices. Other price indices are derived implicitly from indexes of current and constant price series.
#
# Limitations and Exceptions: Consumer price indexes should be interpreted with caution. The definition of a household, the basket of goods, and the geographic (urban or rural) and income group coverage of consumer price surveys can vary widely by country. In addition, weights are derived from household expenditure surveys, which, for budgetary reasons, tend to be conducted infrequently in developing countries, impairing comparability over time. Although useful for measuring consumer price inflation within a country, consumer price indexes are of less value in comparing countries.
#
# Long Definition: Consumer price index reflects changes in the cost to the average consumer of acquiring a basket of goods and services that may be fixed or changed at specified intervals, such as yearly. The Laspeyres formula is generally used. Data are period averages.
#
# Periodicity: Annual
#
# Statistical Concept and Methodology: Consumer price indexes are constructed explicitly, using surveys of the cost of a defined basket of consumer goods and services.
# ```
import requests
import pandas as pd
from collections import defaultdict
import matplotlib.pyplot as plt
# %matplotlib inline
# Let's try to get some Consumer Price Indexes
CPI_CODE = 'FP.CPI.TOTL'
def download_index(country_code,
index_code,
start_date=1960,
end_date=2018):
"""
Get a JSON response for the index data of one country.
Args:
country_code(str): The two letter code for the World Bank webpage
index_code(str): The code for the index to retreive
start_date(int): The initial year to retreive
end_date(int): The final year to retreive
Returns:
str: a JSON string with the raw data
"""
payload = {'format': 'json',
'per_page': '500',
'date':'{}:{}'.format(str(start_date), str(end_date))
}
r = requests.get(
'http://api.worldbank.org/v2/countries/{}/indicators/{}'.format(
country_code, index_code), params=payload)
return r
def format_response(raw_res):
"""
Formats a raw JSON string, returned from the World Bank API into a
pandas DataFrame.
"""
result = defaultdict(dict)
for record in raw_res.json()[1]:
result[record['country']['value']].update(
{record['date']: record['value']})
return pd.DataFrame(result)
def download_cpi(country_code, **kwargs):
"""
Downloads the Consumer Price Index for one country, and returns the data
as a pandas DataFrame.
Args:
country_code(str): The two letter code for the World Bank webpage
**kwargs: Arguments for 'download_index', for example:
start_date(int): The initial year to retreive
end_date(int): The final year to retreive
"""
CPI_CODE = 'FP.CPI.TOTL'
raw_res = download_index(country_code, CPI_CODE, **kwargs)
return format_response(raw_res)
def download_cpis(country_codes, **kwargs):
"""
Download many countries CPIs and store them in a pandas DataFrame.
Args:
country_codes(list(str)): A list with the two letter country codes
**kwargs: Other keyword arguments, such as:
start_date(int): The initial year to retreive
end_date(int): The final year to retreive
Returns:
pd.DataFrame: A dataframe with the CPIs for all the countries in the
input list.
"""
cpi_list = [download_cpi(code, **kwargs) for code in country_codes]
return pd.concat(cpi_list, axis=1)
# ### Testing
cpi = download_cpis(['uy', 'us'])
print(cpi.shape)
cpi.head()
cpi.plot()
res = download_index('uy', 'FP.CPI.TOTL', 1960, 2017)
cpi = format_response(res)
print(cpi.shape)
cpi.head()
cpi.plot()
# +
END_DATE = 2018
cpi = download_cpi('uy', end_date=END_DATE).join(
download_cpi('us', end_date=END_DATE))
print(cpi.shape)
cpi.head()
# -
cpi.plot()
| notebooks/002-mt-getting-the-cpi-from-the-world-bank.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Saying the same thing multiple ways
# What happens when someone comes across a file in our file format? How do they know what it means?
# If we can make the tag names in our model globally unique, then the meaning of the file can be made understandable
# not just to us, but to people and computers all over the world.
#
# Two file formats which give the same information, in different ways, are *syntactically* distinct,
# but so long as they are **semantically** compatible, I can convert from one to the other.
# This is the goal of the technologies introduced this lecture.
# ## The URI
# The key concept that underpins these tools is the URI: uniform resource **indicator**.
#
# These look like URLs:
#
# `www.turing.ac.uk/rsd-engineering/schema/reaction/element`
#
# But, if I load that as a web address, there's nothing there!
#
# That's fine.
#
# A UR**N** indicates a **name** for an entity, and, by using organisational web addresses as a prefix,
# is likely to be unambiguously unique.
#
# A URI might be a URL or a URN, or both.
# ## XML Namespaces
# It's cumbersome to use a full URI every time we want to put a tag in our XML file.
# XML defines *namespaces* to resolve this:
# %%writefile system.xml
<?xml version="1.0" encoding="UTF-8"?>
<system xmlns="http://www.turing.ac.uk/rsd-engineering/schema/reaction">
<reaction>
<reactants>
<molecule stoichiometry="2">
<element symbol="H" number="2"/>
</molecule>
<molecule stoichiometry="1">
<element symbol="O" number="2"/>
</molecule>
</reactants>
<products>
<molecule stoichiometry="2">
<element symbol="H" number="2"/>
<element symbol="O" number="1"/>
</molecule>
</products>
</reaction>
</system>
# +
from lxml import etree
with open('system.xml') as xmlfile:
tree = etree.parse(xmlfile)
# -
print(etree.tostring(tree, pretty_print=True, encoding=str))
# Note that our previous XPath query no longer finds anything.
tree.xpath('//molecule/element[@number="1"]/@symbol')
namespaces={'r': 'http://www.turing.ac.uk/rsd-engineering/schema/reaction'}
tree.xpath('//r:molecule/r:element[@number="1"]/@symbol', namespaces = namespaces)
# Note the prefix `r` used to bind the namespace in the query: any string will do - it's just a dummy variable.
# The above file specified our namespace as a default namespace: this is like doing `from numpy import *` in python.
#
# It's often better to bind the namespace to a prefix:
# %%writefile system.xml
<?xml version="1.0" encoding="UTF-8"?>
<r:system xmlns:r="http://www.turing.ac.uk/rsd-engineering/schema/reaction">
<r:reaction>
<r:reactants>
<r:molecule stoichiometry="2">
<r:element symbol="H" number="2"/>
</r:molecule>
<r:molecule stoichiometry="1">
<r:element symbol="O" number="2"/>
</r:molecule>
</r:reactants>
<r:products>
<r:molecule stoichiometry="2">
<r:element symbol="H" number="2"/>
<r:element symbol="O" number="1"/>
</r:molecule>
</r:products>
</r:reaction>
</r:system>
# ## Namespaces and Schema
# It's a good idea to serve the schema itself from the URI of the namespace treated as a URL, but it's *not a requirement*: it's a URN not necessarily a URL!
#
# +
# %%writefile reactions.xsd
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://www.turing.ac.uk/rsd-engineering/schema/reaction"
xmlns:r="http://www.turing.ac.uk/rsd-engineering/schema/reaction">
<xs:element name="element">
<xs:complexType>
<xs:attribute name="symbol" type="xs:string"/>
<xs:attribute name="number" type="xs:integer"/>
</xs:complexType>
</xs:element>
<xs:element name="molecule">
<xs:complexType>
<xs:sequence>
<xs:element ref="r:element" maxOccurs="unbounded"/>
</xs:sequence>
<xs:attribute name="stoichiometry" type="xs:integer"/>
</xs:complexType>
</xs:element>
<xs:element name="reactants">
<xs:complexType>
<xs:sequence>
<xs:element ref="r:molecule" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="products">
<xs:complexType>
<xs:sequence>
<xs:element ref="r:molecule" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="reaction">
<xs:complexType>
<xs:sequence>
<xs:element ref="r:reactants"/>
<xs:element ref="r:products"/>
</xs:sequence>
</xs:complexType>
</xs:element>
<xs:element name="system">
<xs:complexType>
<xs:sequence>
<xs:element ref="r:reaction" maxOccurs="unbounded"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
# -
# Note we're now defining the target namespace for our schema.
with open("reactions.xsd") as xsdfile:
schema_xsd = xsdfile.read()
schema = etree.XMLSchema(etree.XML(schema_xsd))
parser = etree.XMLParser(schema=schema)
with open("system.xml") as xmlfile:
tree = etree.parse(xmlfile, parser)
print(tree)
# Note the power of binding namespaces when using XML files addressing more than one namespace.
# Here, we can clearly see which variables are part of the schema defining XML schema itself (bound to `xs`)
# and the schema for our file format (bound to `r`)
# ## Using standard vocabularies
# The work we've done so far will enable someone who comes across our file format to track down something about its significance, by following the URI in the namespace. But it's still somewhat ambiguous. The word "element" means (at least) two things: an element tag in an XML document, and a chemical element. (It also means a heating element in a fire, and lots of other things.)
# To make it easier to not make mistakes as to the meaning of **found data**, it is helpful to use
# standardised namespaces that already exist for the concepts our file format refers to.
#
# So that when somebody else picks up one of our data files, the meaning of the stuff it describes is obvious. In this example, it would be hard to get it wrong, of course, but in general, defining file formats so that they are meaningful as found data should be desirable.
# For example, the concepts in our file format are already part of the "DBPedia ontology",
# among others. So, we could redesign our file format to exploit this, by referencing for example [http://dbpedia.org/ontology/ChemicalCompound](http://dbpedia.org/ontology/ChemicalCompound):
# %%writefile chemistry_template3.mko
<?xml version="1.0" encoding="UTF-8"?>
<system xmlns="http://www.turing.ac.uk/rsd-engineering/schema/reaction"
xmlns:dbo="http://dbpedia.org/ontology/">
# %for reaction in reactions:
<reaction>
<reactants>
# %for molecule in reaction.reactants.molecules:
<dbo:ChemicalCompound stoichiometry="${reaction.reactants.molecules[molecule]}">
% for element in molecule.elements:
<dbo:ChemicalElement symbol="${element.symbol}"
number="${molecule.elements[element]}"/>
% endfor
</molecule>
# %endfor
</reactants>
<products>
# %for molecule in reaction.products.molecules:
<dbo:ChemicalCompound stoichiometry="${reaction.products.molecules[molecule]}">
% for element in molecule.elements:
<dbo:ChemicalElement symbol="${element.symbol}" number="${molecule.elements[element]}"/>
% endfor
</molecule>
# %endfor
</products>
</reaction>
# %endfor
</system>
# However, this won't work properly, because it's not up to us to define the XML schema for somebody
# else's entity type: and an XML schema can only target one target namespace.
#
# Of course we should use somebody else's file format for chemical reaction networks: compare [SBML](http://sbml.org) for example. We already know not to reinvent the wheel - and this whole lecture series is just reinventing the wheel for pedagogical purposes. But what if we've already got a bunch of data in our own format. How can we lock down the meaning of our terms?
#
# So, we instead need to declare that our `r:element` *represents the same concept* as `dbo:ChemicalElement`. To do this formally we will need the concepts from the next lecture, specifically `rdf:sameAs`, but first, let's understand the idea of an ontology.
# ## Taxonomies and ontologies
# An Ontology (in computer science terms) is two things: a **controlled vocabulary** of entities (a set of URIs in a namespace), the definitions thereof, and the relationships between them.
# People often casually use the word to mean any formalised taxonomy, but the relation of terms in the ontology to the concepts they represent, and the relationships between them, are also critical.
# Have a look at another example: [http://dublincore.org/documents/dcmi-terms/](http://dublincore.org/documents/dcmi-terms/#terms-creator)
# Note each concept is a URI, but some of these are also stated to be subclasses or superclasses of the others.
# Some are properties of other things, and the domain and range of these verbs are also stated.
# Why is this useful for us in discussing file formats?
# One of the goals of the **semantic web** is to create a way to make file formats which are universally meaningful
# as found data: if I have a file format defined using any formalised ontology, then by tracing statements
# through *rdf:sameAs* relationships, I should be able to reconstruct the information I need.
#
# That will be the goal of the next lecture.
#
| ch09fileformats/11ControlledVocabularies.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import random
import numpy as np
from math import erfc
import matplotlib.pyplot as plt
# %matplotlib inline
# +
def wiener(n, dt, t_init=0, w_init=0.0):
"""Returns one realization of a Wiener process with n steps of length dt.
The time and Wiener series can be initialized using t_init and w_init respectively."""
n+=1
t_series = np.arange(t_init,n*dt,dt)
h = t_series[1]-t_series[0]
z = np.random.normal(0.0,1.0,n)
dw = np.sqrt(h)*z
dw[0] = w_init
w_series = dw.cumsum()
return t_series, w_series
def gaussian(x, mu, sig):
return np.exp(-(x-mu)**2/2/sig**2)/np.sqrt(2*np.pi)/sig
# -
def get_fds(w,lim=None):
'''returns a finite difference series based on the input data
w = input data series
lim = returned differences are between +/-lim.'''
return [w[i+1]-w[i] for i in range(len(w)-1) if lim==None or abs(w[i+1]-w[i])<lim]
def raise_res(T, W, c, mu=0, sigma=1):
'''Increase the resolution of a wiener series by a factor of c.
T = the given Time series.
W = the associated Wiener series.
c = Scaling factor (integer greater than 1).
mu = Mean of W's underlying normal distribution.
sigma = Standard deviation of W's underlying normal distribution.
'''
dT = T[1]-T[0]
dt = float(T[1]-T[0])/c
t_series = []
w_series = []
for i in range(len(T)-1):
t = T[i]
w_t = W[i]
t_next = T[i+1]
w_next = W[i+1]
t_series.append(t)
w_series.append(w_t)
for j in range(c-1):
t+=dt
dW = (w_next-w_t)
if np.sqrt(2)*np.sqrt(t_next-t)*sigma*erfc(-2*random.random())<abs(dW):
w_t+=np.abs(random.gauss(0,np.sqrt(dt)*sigma))*float(dW)/abs(dW)
else:
w_t+=random.gauss(0,np.sqrt(dt)*sigma)
t_series.append(t)
w_series.append(w_t)
t_series.append(T[-1])
w_series.append(W[-1])
return t_series,w_series
# +
c=100
num = 3
dt = 1.
T1,W1 = wiener(num*c,dt/c)
T2 = [T1[i] for i in range(len(T1)) if i%c==0]
W2 = [W1[i] for i in range(len(W1)) if i%c==0]
Tn,Wn = raise_res(T2,W2,c)
plt.figure(figsize=(15,6))
plt.title('A True Wiener Series, Its Skeleton, and a Scaled Wiener Series Derived From the Skeleton',fontsize=15)
plt.plot(T1,W1,label='actual',color='b')
plt.plot(Tn,Wn,label='scaled',color='m')
plt.plot(T2,W2,label='skeleton',color='k',linewidth=2,marker='o',markersize=6)
plt.ylabel('$W(t)$',fontsize=15)
plt.xlabel('$t$',fontsize=18)
plt.legend()
plt.show()
dW1 = get_fds(W1)
bnum1 = (max(dW1)-min(dW1))*50
dWn = get_fds(Wn)
bnumn = (max(dWn)-min(dWn))*50
plt.figure(figsize=(15,4))
ax1 = plt.subplot(121)
ax1.set_title('Finite Differences From the Actual Wiener Series')
ax1.set_ylabel('count',fontsize=13)
ax1.set_xlabel('$dW$',fontsize=15)
plt.hist(dW1,bnum1,label='actual')
plt.legend()
ax2 = plt.subplot(122,sharey=ax1)
ax2.set_title('Finite Differences From the Scaled Wiener Series')
ax2.set_ylabel('count',fontsize=13)
ax2.set_xlabel('$dW$',fontsize=15)
plt.hist(dWn,bnumn,label='scaled',color='m')
plt.legend()
plt.show()
# +
c=1000
num = 5
dt = 1.
T1,W1 = wiener(num*c,dt/c)
T2 = [T1[i] for i in range(len(T1)) if i%c==0]
W2 = [W1[i] for i in range(len(W1)) if i%c==0]
Tn,Wn = raise_res(T2,W2,c)
plt.figure(figsize=(15,6))
plt.title('A True Wiener Series, Its Skeleton, and a Scaled Wiener Series Derived From the Skeleton',fontsize=15)
plt.plot(T1,W1,label='actual',color='b')
plt.plot(Tn,Wn,label='scaled',color='m')
plt.plot(T2,W2,label='skeleton',color='k',linewidth=2,marker='o',markersize=6)
plt.ylabel('$W(t)$',fontsize=15)
plt.xlabel('$t$',fontsize=18)
plt.legend()
plt.show()
dW1 = get_fds(W1)
bnum1 = (max(dW1)-min(dW1))*150
dWn = get_fds(Wn)
bnumn = (max(dWn)-min(dWn))*150
plt.figure(figsize=(15,4))
ax1 = plt.subplot(121)
ax1.set_title('Finite Differences From the Actual Wiener Series')
ax1.set_ylabel('count',fontsize=13)
ax1.set_xlabel('$dW$',fontsize=15)
plt.hist(dW1,bnum1,label='actual')
plt.legend()
ax2 = plt.subplot(122,sharey=ax1)
ax2.set_title('Finite Differences From the Scaled Wiener Series')
ax2.set_ylabel('count',fontsize=13)
ax2.set_xlabel('$dW$',fontsize=15)
plt.hist(dWn,bnumn,label='scaled',color='m')
plt.legend()
plt.show()
# +
c=1000
num = 50
dt = 1.
T1,W1 = wiener(num*c,dt/c)
T2 = [T1[i] for i in range(len(T1)) if i%c==0]
W2 = [W1[i] for i in range(len(W1)) if i%c==0]
Tn,Wn = raise_res(T2,W2,c)
plt.figure(figsize=(15,6))
plt.title('A True Wiener Series, Its Skeleton, and a Scaled Wiener Series Derived From the Skeleton',fontsize=15)
plt.plot(T1,W1,label='actual',color='b')
plt.plot(Tn,Wn,label='scaled',color='m')
plt.plot(T2,W2,label='skeleton',color='k',linewidth=2,marker='o',markersize=6)
plt.ylabel('$W(t)$',fontsize=15)
plt.xlabel('$t$',fontsize=18)
plt.legend()
plt.show()
dW1 = get_fds(W1)
bnum1 = (max(dW1)-min(dW1))*150
dWn = get_fds(Wn)
bnumn = (max(dWn)-min(dWn))*150
plt.figure(figsize=(15,4))
ax1 = plt.subplot(121)
ax1.set_title('Finite Differences From the Actual Wiener Series')
ax1.set_ylabel('count',fontsize=13)
ax1.set_xlabel('$dW$',fontsize=15)
plt.hist(dW1,bnum1,label='actual')
plt.legend()
ax2 = plt.subplot(122,sharey=ax1)
ax2.set_title('Finite Differences From the Scaled Wiener Series')
ax2.set_ylabel('count',fontsize=13)
ax2.set_xlabel('$dW$',fontsize=15)
plt.hist(dWn,bnumn,label='scaled',color='m')
plt.legend()
plt.show()
# -
from brownian.wiener import raise_res
# +
c=100
num = 3
dt = 1.
T1,W1 = wiener(num*c,dt/c)
T2 = [T1[i] for i in range(len(T1)) if i%c==0]
W2 = [W1[i] for i in range(len(W1)) if i%c==0]
Tn,Wn = raise_res(T2,W2,c)
plt.figure(figsize=(15,6))
plt.title('A True Wiener Series, Its Skeleton, and a Scaled Wiener Series Derived From the Skeleton',fontsize=15)
plt.plot(T1,W1,label='actual',color='b')
plt.plot(Tn,Wn,label='scaled',color='m')
plt.plot(T2,W2,label='skeleton',color='k',linewidth=2,marker='o',markersize=6)
plt.ylabel('$W(t)$',fontsize=15)
plt.xlabel('$t$',fontsize=18)
plt.legend()
plt.show()
# -
def autocorr(x):
result = np.correlate(x, x, mode='full')
return result[result.size/2:]
# +
a_T1 = autocorr(T1)
a_Tn = autocorr(Tn)
a_1ndif = a_T1-a_Tn
plt.figure(figsize=(5,10))
ax1 = plt.subplot(311)
ax1.set_title('acf plots',fontsize=15)
ax1.set_ylabel('acf actual',fontsize=12)
plt.plot(range(len(a_T1)),a_T1,label='actual')
plt.legend()
ax2 = plt.subplot(312,sharex=ax1)
ax2.set_ylabel('acf',fontsize=12)
plt.plot(range(len(a_Tn)),a_Tn,label='scaled')
plt.legend()
ax3 = plt.subplot(313,sharex=ax1)
ax3.set_ylabel('acf difference',fontsize=12)
ax3.set_xlabel('lag',fontsize=12)
plt.plot(range(len(a_1ndif)),a_1ndif,label='actual - scaled')
plt.legend()
plt.show()
# -
| brwnn-raise_res.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 2 - Bacterial Bomb
# +
# Packages
# %matplotlib inline
import random # used to randomise wind directions and settling rates
import time # used to measure model runtime
import csv # used to format loaded text files as csvs
import matplotlib.pyplot as plt # used to plot density map
import numpy as np # used to save results to text file
from ipywidgets import interact # used for scroll bars
#import pandas as pd # used for table?
#random.seed(100) # Used to maintain consistency for model testing - not needed for gneral use
# +
# Variables
#The model begins with the colony being released at a set height.
#the colony size and intial elevation are listed here:
Colony_Size = 5000 # Total number of Bacteria Released by Bomb
startheight = 75 # Elevation Bacteria are intially released at
#the below code adds slider bars allowing the user to easily alter the colony size and starting elevation
@interact(colony_size=(0,10000))
def set_cs(colony_size):
Colony_Size = colony_size
@interact(elevation=(50,100))
def set_sh(elevation):
startheight = elevation
# Each second the wind carries each bacterium in one of 4 directions.
# The liklihood of moving in each compass direction are dictated below:
NorthW = 10 # percentage likelihood of wind carrying a bacterium north
EastW = 75 # percentage likelihood of wind carrying a bacterium east
SouthW = 10 # percentage likelihood of wind carrying a bacterium south
WestW = 5 # percentage likelihood of wind carrying a bacterium west
StrengthW = 1 # distance wind carries bacteria each second
# Any exposed bacteria (those above or at the start height) may be carried upwards by turbulence.
# The likelihood of this is dictated by the following variables:
Rise = 20 # percentage likelihood of wind turbulence lifting an exposed bacterium 1 unit
Fall = 70 # percentage likelihood of exposed bacterium dropping 1 unit
# The excess 10% is covered by cases where the bacteria neither rises or falls
# The following variables are used at output to provide insight into the model outcome.
timer = 0 # counts how many 'seconds' would have passed before model completion
lost = 0 # counts how many bacteria leave the model area and disperse before settling
settled = 0 # counts how many bacteria from the original colony successfully settle in the town area
#note that by the model's completion 'lost' and 'settled' should sum to 'Colony_Size'
# The following two variables store the ground zero coordinates.
# The exact coordinates can only be identified once the file containing them is loaded in.
sx = None # stores ground zero x coordinate
sy = None # stores ground zero y coordinate
#Finally several empty lists are created to be later filled
bacteria = [] # This list will store the bacteria agent data
GZ = [] # This list will be used to load the file identifying the ground zero location
DensityMap = [] # This list will be used to track the locations of settled bacteria
# +
# Classes
class Bacterium: # Class to establish characteristics of model agents
def __init__(self, height, x, y, dm):
self.height = height # used to track elevation of bacterium
self.x = x # used to track x coordinate of bacterium
self.y = y # used to track y coordinate of bacterium
self.dm = dm # links bacterium to density map to allow for interaction
self.complete = 0 # used to track whether bacterium is settled/lost and can be removed
class net: # Class used for identifying ground zero
def __init__(self, x, y, location):
self.x = x # stores x coordinate of value in loaded file
self.y = y # stores y coordinates of value in loaded file
self.location = location # stores value from loaded file found at above coordinates
# +
# Load Start Conditions from File
# Here a fuction for loading text files into arrays is defined:
def loadup(env,data):
env.clear() # Empty Environment (blank slate)
txt = open(data, newline='') # Loads text file into model
file = csv.reader(txt, quoting=csv.QUOTE_NONNUMERIC) # Read text file as a CSV file
for rows in file: # For each row in the CSV file:
values = [] #Create a list to store this rows scores
env.append(values) #add this row to main environment list
for pv in rows: # for each pixel value:
values.append(pv) #Add pixel value to row
txt.close()
# Load Ground Zero Data
x = "wind" #Loads data from provided file storing the ground zero coordinates
loadup(GZ,x) #fills empty 'GZ' list with data from the "wind" file
# Load Empty Text File
base = "Blank.txt" #Loads a blank 300x300 text file
loadup(DensityMap, base) #fills empty 'DensityMap' list with blank values in 300x300 extent
# +
# Finding coordinates for ground zero
sp = 255 # From the assignment it is known that the ground zero coordinates are marked by the value '255'
scope = [] # create empty list for scanning through ground zero data
for x in range(300): # for every x coordinate:
for y in range(300): # at every y coordinate:
scope.append(net(x,y,GZ)) # store value from ground zero file at said coordinates
def impact(self, sp): # create function for checking if coordinates are ground zero
global sx
global sy
if self.location[self.x][self.y] == sp: # if value at coordinates is identified marker:
sx = self.x # starting x coordinate is set
sy = self.y # starting y coordinate is set
for r in range(90000): # for every point in array (300x300)
impact(scope[r], sp) # check if point is ground zero
# print(sy, sx) # Test to ensure coordinates have been identified
# the below values are used for determing the final plot extent.
# here they are set to ground zero values as there has not been any spread of the colong
maxspread_x = sx
minspread_x = sx
maxspread_y = sy
minspread_y = sy
# -
# Create Bacteria
for r in range(Colony_Size): # For every bacterium in colony
bacteria.append(Bacterium(startheight, sx, sy, DensityMap)) # create a bacterium agent
# +
# Variable Warnings
# Checks to make sure defined percentage odds do not exceed 100%
#checks that wind direction percentages do not exceed 100%
if NorthW + EastW + SouthW + WestW > 100:
print("Warning! Wind direction probabilities exceed 100%!")
#checks that turbulence percentages do not exceed 100%
if Rise + Fall > 100:
print("Warning! Rise and Fall probabilities exceed 100%!")
# +
# Functions
# Here the main model functions are defined.
def wind(self, cs, N, E, S ,W, strength): # Controls Compass direction movements of agents
global Colony_Size
global lost
for x in range(cs): # for each bacterium in the colony:
w = random.randint(1,100) # randomise the wind direction
if w <= N: # if the wind is travelling north
self[x].y += strength # move bacterium northwards distance equal to wind speed
else:
if w > N and w <= N+E: # if the wind is travelling east
self[x].x += strength # move bacterium eastwards distance equal to wind speed
else:
if w > N+E and w <= N+E+S: # if the wind is travelling south
self[x].y -= strength # move bacterium southwards distance equal to wind speed
else:
if w > N+E+S and w <= N+E+S+W: # if the wind is travelling west
self[x].x -= strength # move bacterium westwards distance equal to wind speed
if self[x].x < 1 or self[x].x > 299: # if the bacterium is now out of bounds on the x axis:
Colony_Size -= 1 # reduce colony size
self[x].complete == 1 # mark bacterium for removal
lost += 1 # increase lost counter by one
else: # if the bacterium has not already gone out of bounds on the x axis:
if self[x].y < 1 or self[x].y > 299 and self[x].complete == 0: # if the bacterium is now out of bounds on the y axis
Colony_Size -= 1 # reduce colony size
self[x].complete == 1 # mark bacterium for removal
lost += 1 # increase lost counter by one
def settle(self, cs, rise, fall, startheight): # function for determining the impact of turbulence
for x in range(cs): # for every bacterium
w = random.randint(1,100) # randomise turbulence
if self[x].height < startheight: # if the bacterium is below start height
self[x].height -= 1 # elevation drops by 1 unit
else: # if bacterium is at or above start height:
if w <= fall: #if the wind does not carry the bacterium
self[x].height -= 1 # elevation drops by 1 unit
else:
if w > fall and w <= rise + fall: #if the wind uplifts the bacterium
self[x].height += 1 # elevation rises by 1 unit
# If neither occur, the wind keeps the bacterium height constant
def landing(self, cs, base): # function for adding bacterium that land to density map
global Colony_Size
global settled
global maxspread_x
global minspread_x
global maxspread_y
global minspread_y
for x in range(cs): # for every bacterium:
if self[x].height <= 0 and self[x].complete == 0: # if the agent has reached the ground and is not already processed
base[self[x].y][self[x].x] += 1 # mark landing on density map at agent's coordinates
self[x].complete == 1 # mark agent for removal
settled += 1 # increase settled count by 1
Colony_Size -= 1 # reduce colony size by 1
# Establish bounds for plot to avoid showing a mostly empty grid based on location of landing agents
if self[x].x > maxspread_x: # if agents is further away than previous furthest on the x axis east of ground zero
maxspread_x = self[x].x # set as new furthest east distance on the x axis
if self[x].x < minspread_x: # if agents is further away than previous furthest on the x axis west of ground zero
minspread_x = self[x].x # set as new furthest west distance on the x axis
if self[x].y > maxspread_y: # if agents is further away than previous furthest on the y axis north of ground zero
maxspread_y = self[x].y # set as new furthest north distance on the x axis
if self[x].y < minspread_y: # if agents is further away than previous furthest on the y axis south of ground zero
minspread_y = self[x].y # set as new furthest south distance on the x axis
# as agents cannot be deleted within the iterating loops a seperate step is needed :
def removal(self, agents, cs): # copies list and removes landed bacteria
temp = [] # creates an empty list for temporary storage
for x in range(cs): # for every bacterium
if self[x].complete == 0: # if not marked for removal
temp.append(self[x]) # add to temporary list
agents = temp # update main list to match temporary, deleting marked agents
# +
# Define Model
def model(self): # new function condensing previous functions for easy looping
#time.sleep(1) # Technically the model should run each step every second. Replaced by representative count for efficiency
removal(self, bacteria, Colony_Size) # remove any settled bacteria before modelling
wind(self, Colony_Size, NorthW, EastW, SouthW, WestW, StrengthW) # move bacteria horizontally
removal(self, bacteria, Colony_Size) # remove any newly out of bounds bacteria
settle(self, Colony_Size, Rise, Fall, startheight) # move bacteria vertically
landing(self, Colony_Size, DensityMap) # check if any bacteria have now landed
# +
# Run Model
start_time = time.time() # mark beginning of model run (for testing and output purposes)
for runs in range(1000): # set control of max iterations
if Colony_Size > 0: # so long as there are active bacteria:
timer += 1 # increas timer by one (simulates seconds passing)
model(bacteria) #complete one run through of model
end_time = time.time()# once completed mark end of model
run_time = end_time - start_time #calculate model runtime
# +
# Plot Extent
x_extent_high = maxspread_x + 5 # add buffer east direction
x_extent_low = minspread_x - 5 # add buffer west direction
y_extent_high = maxspread_y + 5 # add buffer north direction
y_extent_low = minspread_y - 5 # add buffer south direction
# Ensure that plot does not extend beyond the boundaries of the density map
if x_extent_high >= 300:
x_extent_high = 299
if y_extent_high >= 300:
y_extent_high = 299
if x_extent_low <= 0:
x_extent_low = 1
if y_extent_low <= 0:
y_extent_low = 1
# If no bacteria settle, show full map extent
if settled == 0:
x_extent_high = 300
y_extent_high = 300
x_extent_low = 0
y_extent_low = 0
# -
# save model ouput as a .txt file
np.savetxt("output.txt",DensityMap,delimiter=',');
# +
# Plot Density Map
plt.ylim(y_extent_low, y_extent_high) # set y axis to established extent of fallout
plt.xlim(x_extent_low, x_extent_high) # limit x axis to established extent of fallout
plt.imshow(DensityMap, cmap='seismic') # plot density map
plt.scatter(sx, sy, color = "red") # plot marker denoting ground zero as red dot
# -
# FInal statistics
print(settled, "bacteria settled and", lost, "lost from a colony of", settled + lost,"bacteria, in", timer, "seconds from dispersal time (real time:", run_time, "seconds)")
stats = np.array([])
| A2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# # Convolutional Neural Network Implementation in GWU_NN
# Demo of simple CNN vs Dense network trained on MNIST handwritten digits dataset. Binary Classifier of 1's and 0's.
#
# ## Import libraries
# Only using sklearn and tensorflow for test_train_split and importing the mnist dataset.
# +
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tensorflow.keras.datasets import mnist
from gwu_nn.gwu_network import GWUNetwork
from gwu_nn.layers import Dense, Convolutional, Flatten, MaxPool
# -
# ## Setting up the data
# Load the MNIST dataset and split into training and testing sets. Only add images to training/testing that are of 0s or 1s (because it will be a binary classifier).
# +
# Load the MNIST dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
num1 = 0
num2 = 1
x_train_sample = []
y_train_sample = []
train_samples = 200
for i in range(len(X_train)):
if y_train[i] == num1 or y_train[i] == num2:
x_train_sample.append(X_train[i])
y_train_sample.append(y_train[i])
if len(x_train_sample) >= train_samples:
break
x_test_sample = []
y_test_sample = []
i_test_sample = []
samples = 500
for i in range(len(X_test)):
if y_test[i] == num1 or y_test[i] == num2:
x_test_sample.append(X_test[i])
y_test_sample.append(y_test[i])
i_test_sample.append(i)
if len(x_test_sample) >= samples:
break
print("x_train_sample: " + str(np.array(x_train_sample).shape))
print("x_test_sample: " + str(np.array(x_test_sample).shape))
# + [markdown] tags=[]
# ## Training a Dense Network
# Setup and train a simple dense network to use as benchmark against the CNN model.
# +
np.random.seed(1)
np.random.RandomState(1)
dense = GWUNetwork()
dense.add(Flatten(28,input_channels=1)) # Flat layer so the image is in the right dimensions
dense.add(Dense(20, activation='relu'))
dense.add(Dense(1, add_bias=False, activation='sigmoid'))
# Finally to complete our model we need to compile it. This defines our loss function and learning_rate
dense.compile(loss='log_loss', lr=0.001)
print(dense)
dense.fit(x_train_sample, y_train_sample, epochs=1)
# + [markdown] tags=[]
# ## Evaluating the Dense Network
# Generate predictions using the test split.
# +
# Predict using the test set. Calculate the accuracy
dense_raw_predictions = dense.predict(x_test_sample)
dense_predictions = [round(x[0][0]) for x in dense_raw_predictions]
dense_actual = [y for y in y_test_sample]
count = 0
for p,a in zip(dense_predictions,dense_actual):
if p == a:
count += 1
print("Dense model accuracy: " + str(100 * count/len(dense_predictions)))
# + [markdown] tags=[]
# ## Training a Convolutional Neural Network
# Setup and train a simple CNN. Only using one convolutional layer to keep things fast.
# +
np.random.seed(1)
np.random.RandomState(1)
cnn = GWUNetwork()
cnn.add(Convolutional(input_size=28, input_channels=1, kernel_size=3, num_kernels=1, activation='relu'))
cnn.add(MaxPool(28,2))
cnn.add(Flatten(14,input_channels=1)) # input size = 28/2
cnn.add(Dense(40, activation='relu')) # gets double the neurons here since input is only 14 (vs dense's 28)
cnn.add(Dense(1, add_bias=False, activation='sigmoid'))
# Finally to complete our model we need to compile it. This defines our loss function and learning_rate
cnn.compile(loss='log_loss', lr=0.001)
print(cnn)
cnn.fit(x_train_sample, y_train_sample, epochs=1)
# + [markdown] tags=[]
# ## Evaluating the CNN
# Generate predictions using the test split.
# +
# Predict using the test set. Calculate the accuracy
cnn_raw_predictions = cnn.predict(x_test_sample)
# calculate accuracy and show incorrect classifications
cnn_predictions = [round(x[0][0]) for x in cnn_raw_predictions]
count = 0
for p,a,i in zip(cnn_predictions,y_test_sample,i_test_sample):
if p == a:
count += 1
print("CNN model accuracy: " + str(100 * count/len(cnn_predictions)))
#print(cnn_predictions)
#print(y_test_sample)
# -
# ## Show a random evaluation
# Visualize the predictions by showing the prediction from both networks against the actual image.
# +
show_idx = 3
print("Dense Prediction: " + str(dense_predictions[show_idx]))
print("CNN Prediction: " + str(cnn_predictions[show_idx]))
print("Actual: " + str(y_test_sample[show_idx]))
ax = plt.subplot()
plt.imshow(x_test_sample[show_idx], cmap='gray')
plt.show()
# -
# ## Visualize the Kernel Weights
# Lets see what the kernel weights look like...
# +
kernel = cnn.layers[0].kernels.reshape(3,3)
plt.imshow(kernel, cmap='gray')
# -
| demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Unsupervised outliers detection (mixture of data)
# +
# import drama as drm
# import numpy as np
# import matplotlib.pylab as plt
# from matplotlib import gridspec
# # %matplotlib inline
# ## Signal synthesis
# i_sig = 2
# n_ftrs = 100
# noise = 0.3
# scl = 0.0
# sft = 0.0
# x = np.linspace(0,1,n_ftrs)
# X, y = drm.synt_mix(i_sig,n_ftrs,x=x,sigma = noise,n1 = scl,n2 = sft,n3 = scl,n4 = sft)
# gs = gridspec.GridSpec(1, 2)
# plt.figure(figsize=(8,3))
# ax1 = plt.subplot(gs[0, 0])
# ax2 = plt.subplot(gs[0, 1])
# ax1.set_title('Inliers')
# ax2.set_title('Outliers')
# inliers = X[y==i_sig]
# outliers = X[y!=i_sig]
# outliers_y = y[y!=i_sig]
# for i in range(0,45,5):
# ax1.plot(inliers[i],'b')
# ax2.plot(outliers[i],drm.COLORS[outliers_y[i]])
# plt.subplots_adjust(hspace=0.3,left=0.1, right=0.9, top=0.9, bottom=0.1)
# -
| notebooks/old_set/new.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# +
import glob
import os
import librosa
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
# %matplotlib inline
plt.style.use('ggplot')
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 12
plt.rcParams['axes.labelsize'] = 11
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 12
plt.rcParams['xtick.labelsize'] = 9
plt.rcParams['ytick.labelsize'] = 9
plt.rcParams['legend.fontsize'] = 11
plt.rcParams['figure.titlesize'] = 13
# +
def windows(data, window_size):
start = 0
while start < len(data):
yield start, start + window_size
start += (window_size / 2)
def extract_features(parent_dir,sub_dirs,file_ext="*.wav",bands = 60, frames = 41):
window_size = 512 * (frames - 1)
log_specgrams = []
labels = []
for l, sub_dir in enumerate(sub_dirs):
for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):
sound_clip,s = librosa.load(fn)
label = fn.split('/')[2].split('-')[1]
for (start,end) in windows(sound_clip,window_size):
if(len(sound_clip[start:end]) == window_size):
signal = sound_clip[start:end]
melspec = librosa.feature.melspectrogram(signal, n_mels = bands)
logspec = librosa.logamplitude(melspec)
logspec = logspec.T.flatten()[:, np.newaxis].T
log_specgrams.append(logspec)
labels.append(label)
log_specgrams = np.asarray(log_specgrams).reshape(len(log_specgrams),bands,frames,1)
features = np.concatenate((log_specgrams, np.zeros(np.shape(log_specgrams))), axis = 3)
for i in range(len(features)):
features[i, :, :, 1] = librosa.feature.delta(features[i, :, :, 0])
return np.array(features), np.array(labels,dtype = np.int)
def one_hot_encode(labels):
n_labels = len(labels)
n_unique_labels = len(np.unique(labels))
one_hot_encode = np.zeros((n_labels,n_unique_labels))
one_hot_encode[np.arange(n_labels), labels] = 1
return one_hot_encode
# -
parent_dir = 'Sound-Data'
sub_dirs= ['fold1','fold2']
features,labels = extract_features(parent_dir,sub_dirs)
labels = one_hot_encode(labels)
# +
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev = 0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(1.0, shape = shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x,W,strides=[1,2,2,1], padding='SAME')
def apply_convolution(x,kernel_size,num_channels,depth):
weights = weight_variable([kernel_size, kernel_size, num_channels, depth])
biases = bias_variable([depth])
return tf.nn.relu(tf.add(conv2d(x, weights),biases))
def apply_max_pool(x,kernel_size,stride_size):
return tf.nn.max_pool(x, ksize=[1, kernel_size, kernel_size, 1],
strides=[1, stride_size, stride_size, 1], padding='SAME')
# +
rnd_indices = np.random.rand(len(labels)) < 0.70
train_x = features[rnd_indices]
train_y = labels[rnd_indices]
test_x = features[~rnd_indices]
test_y = labels[~rnd_indices]
# +
frames = 41
bands = 60
feature_size = 2460 #60x41
num_labels = 10
num_channels = 2
batch_size = 50
kernel_size = 30
depth = 20
num_hidden = 200
learning_rate = 0.01
training_iterations = 2000
# +
X = tf.placeholder(tf.float32, shape=[None,bands,frames,num_channels])
Y = tf.placeholder(tf.float32, shape=[None,num_labels])
cov = apply_convolution(X,kernel_size,num_channels,depth)
shape = cov.get_shape().as_list()
cov_flat = tf.reshape(cov, [-1, shape[1] * shape[2] * shape[3]])
f_weights = weight_variable([shape[1] * shape[2] * depth, num_hidden])
f_biases = bias_variable([num_hidden])
f = tf.nn.sigmoid(tf.add(tf.matmul(cov_flat, f_weights),f_biases))
out_weights = weight_variable([num_hidden, num_labels])
out_biases = bias_variable([num_labels])
y_ = tf.nn.softmax(tf.matmul(f, out_weights) + out_biases)
# -
cross_entropy = -tf.reduce_sum(Y * tf.log(y_))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
cost_history = np.empty(shape=[1],dtype=float)
with tf.Session() as session:
tf.initialize_all_variables().run()
for itr in range(training_iterations):
offset = (itr * batch_size) % (train_y.shape[0] - batch_size)
batch_x = train_x[offset:(offset + batch_size), :, :, :]
batch_y = train_y[offset:(offset + batch_size), :]
_, c = session.run([optimizer, cross_entropy],feed_dict={X: batch_x, Y : batch_y})
cost_history = np.append(cost_history,c)
print('Test accuracy: ',round(session.run(accuracy, feed_dict={X: test_x, Y: test_y}) , 3))
fig = plt.figure(figsize=(15,10))
plt.plot(cost_history)
plt.axis([0,training_epochs,0,np.max(cost_history)])
plt.show()
| Urban Sound Classification using CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Debugger
#
# You've probably used a variety of print statements to try to find errors in your code. A better way of doing this is by using Python's built-in debugger module (pdb). The pdb module implements an interactive debugging environment for Python programs. It includes features to let you pause your program, look at the values of variables, and watch program execution step-by-step, so you can understand what your program actually does and find bugs in the logic.
#
# This is a bit difficult to show since it requires creating an error on purpose, but hopefully this simple example illustrates the power of the pdb module. <br>*Note: Keep in mind it would be pretty unusual to use pdb in an Jupyter Notebook setting.*
#
# ___
# Here we will create an error on purpose, trying to add a list to an integer
# +
x = [1,3,4]
y = 2
z = 3
result = y + z
print(result)
result2 = y+x
print(result2)
# -
# Hmmm, looks like we get an error! Let's implement a set_trace() using the pdb module. This will allow us to basically pause the code at the point of the trace and check if anything is wrong.
# +
import pdb
x = [1,3,4]
y = 2
z = 3
result = y + z
print(result)
# Set a trace using Python Debugger
pdb.set_trace()
result2 = y+x
print(result2)
# -
# Great! Now we could check what the various variables were and check for errors. You can use 'q' to quit the debugger. For more information on general debugging techniques and more methods, check out the official documentation:
# https://docs.python.org/3/library/pdb.html
| 4-assets/BOOKS/Jupyter-Notebooks/03-Python_Debugger_(pdb)-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 最初に必要なライブラリを読み込みます。
# +
from sympy import *
from sympy.physics.quantum import *
from sympy.physics.quantum.qubit import Qubit, QubitBra, measure_all, measure_all_oneshot,measure_partial, matrix_to_qubit
from sympy.physics.quantum.gate import H,X,Y,Z,S,T,CPHASE,CNOT,SWAP,UGate,CGateS,gate_simp,OneQubitGate
from sympy.physics.quantum.gate import IdentityGate as _I
from sympy.physics.quantum.qft import *
from sympy.physics.quantum.matrixcache import matrix_cache
matrix_cache.cache_matrix('Tdg',Matrix([[1, 0], [0, exp(-I*pi/4)]]))
matrix_cache.cache_matrix('Sdg',Matrix([[1, 0], [0, -I]]))
matrix_cache.cache_matrix('V',(1/2)*Matrix([[1+I, 1-I], [1-I, 1+I]]))
matrix_cache.cache_matrix('Vdg',(1/2)*Matrix([[1-I, 1+I], [1+I, 1-I]]))
CZ = CPHASE
class Tdg(OneQubitGate): # T^{\dagger} 演算
gate_name = u'Tdg'
gate_name_latex = u'T^{\dagger}'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('Tdg', format)
class Sdg(OneQubitGate): # S^{\dagger} 演算
gate_name = u'Sdg'
gate_name_latex = u'S^{\dagger}'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('Sdg', format)
class V(OneQubitGate): # √X 演算
gate_name = u'V'
gate_name_latex = u'V'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('V', format)
class Vdg(OneQubitGate): # √X ^{\dagger}演算
gate_name = u'Vdg'
gate_name_latex = u'V^{\dagger}'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('Vdg', format)
def CV(c,t): return CGateS((c),V(t))
def CVdg(c,t): return CGateS((c),Vdg(t))
def CCX(c1,c2,t): return CGateS((c1,c2),X(t))
def Toffoli(c1,c2,t): return CGateS((c1,c2),X(t))
def CCZ(c1,c2,t): return (H(t)*CCX(c1,c2,t)*H(t)) # CCZ演算子を定義します。
def hadamard(s,n):
h = H(s)
for i in range(s+1,n+s): h = H(i)*h
return h
def disp1Q(u): print(u); display(represent(u,nqubits=1)); CircuitPlot(u,nqubits=1)
def disp2Q(u): print(u); display(represent(u,nqubits=2)); CircuitPlot(u,nqubits=2)
from sympy.printing.dot import dotprint
init_printing()
# %matplotlib inline
import matplotlib.pyplot as plt
from sympy.physics.quantum.circuitplot import CircuitPlot,labeller, Mz,CreateOneQubitGate
alpha, beta, psi, phi, theta, chi = Symbol(r'\alpha'), Symbol(r'\beta'), Symbol(r'\psi'), Symbol(r'\phi'), Symbol(r'\theta'), Symbol(r'\chi')
from qutip import *
import numpy as np
# -
# (副読本)
# * ニールセン・チャン「量子コンピュータと量子通信」(ここでは N,C"QCQI" と書きます)
# * arXivに公開されている論文
# # 量子コンピュータの基本
# ## 2.1 量子ビットとは?
# ### 2.1.1 ブロッホ球とブラケット記法
# ケット
Ket(psi) , alpha * Ket('0') + beta * Ket('1')
# ブラ
Bra(psi), Dagger(alpha * Ket('0') + beta * Ket('1') ) , Dagger(alpha) * Bra('0') + Dagger(beta) * Bra('1')
# <ψ|ψ> の計算
expand(Dagger(alpha * Ket('0') + beta * Ket('1') ) * (alpha * Ket('0') + beta * Ket('1') ))
# $\alpha \alpha^{\dagger} = 1, \beta \beta^{\dagger} = 1, \langle 0 \vert 0 \rangle = 1, \langle 1 \vert 1 \rangle = 1, \langle 1 \vert 0 \rangle = 0, \langle 0 \vert 1 \rangle = 0$
# *keywords*
#
# - グローバル位相
# - ブロッホ球
# - Z基底(計算基底)
# - X基底(アダマール基底)
# - Y基底(円基底、Circular basis)
b = Bloch()
zero_state = [0,0,1]
b.add_vectors(zero_state)
one_state = [0,0,-1]
b.add_vectors(one_state)
plus_state = [1,0,0]
b.add_vectors(plus_state)
minus_state = [-1,0,0]
b.add_vectors(minus_state)
b.show()
# ### 2.1.2 量子ビットの不思議な性質
# *keywords*
# - 確率振幅
# - ボルンの規則
# - 射影測定
# - デコーヒーレンス
# - コヒーレント時間
# $ T_1 $ : $\lvert 1 \rangle$が $\lvert 0 \rangle$になってしまう時間
# $ T_2 $:重ね合わせが壊れてしまう時間
# ## 2.2 量子ゲートで計算する
# ### 2.2.1 1量子ビットゲート
# * Xゲート
disp1Q(X(0))
disp1Q(X(0)*Qubit('0'))
disp1Q(X(0)*Qubit('1'))
# * Yゲート
disp1Q(Y(0))
# * Zゲート
disp1Q(Z(0))
disp1Q(Z(0)*Qubit('0'))
disp1Q(Z(0)*Qubit('1'))
# * アダマールゲート(H)
disp1Q(H(0))
# * 位相ゲート(S)
# Z軸周りの $ \pi / 2 $ 回転
disp1Q(S(0))
# * $\pi/8$ゲート(T)
# Z軸周りの $ \pi / 4 $ 回転
disp1Q(T(0))
# TT
disp1Q(T(0) *_I(0)* T(0)) # CircuitPlot がうまく描けないため恒等変換(I)を挟む
# SS
disp1Q(S(0)*_I(0) *S(0)) # CircuitPlot がうまく描けないため恒等変換(I)を挟む
# ### 2.2.2 2量子ビットゲート
a0, a1, b0, b1 = Symbol('a0'), Symbol('a1'), Symbol('b0'), Symbol('b1')
Matrix([[a0],[a1]])
Matrix([b0,b1])
Matrix([[a0],[a1]]) .dot( Matrix([b0,b1]) )
TensorProduct(Matrix([a0,a1]),( Matrix([b0,b1]) ))
u11, u12, u21, u22 = Symbol('u11'), Symbol('u12'), Symbol('u21'), Symbol('u22')
v11, v12, v21, v22 = Symbol('v11'), Symbol('v12'), Symbol('v21'), Symbol('v22')
TensorProduct(Matrix([[u11, u12], [u21, u22]]), Matrix([[v11, v12], [v21, v22]]))
represent(CNOT(1,0),nqubits=2)
represent(SWAP(1,0), nqubits=2)
sw=CNOT(1,0)*CNOT(0,1)*CNOT(1,0)
represent(sw,nqubits=2)
CircuitPlot(sw, nqubits=2)
# ## 2.3 量子回路図を書いてみよう
# ### 2.3.1 量子回路図の書き方
q=H(2)*CZ(1,2)*CNOT(0,1)*H(0)*H(2)
CircuitPlot(q, nqubits=3)
qst=represent(q*Qubit('000'),nqubits=3)
qst
matrix_to_qubit(qst)
# ### 2.3.2 量子コンピュータ版NANDゲート(Toffoliゲート)
represent(Toffoli(2,1,0),nqubits=3)
CircuitPlot(Toffoli(2,1,0),nqubits=3)
# (参考)
#
# [Elementary gates for quantum computation(arXiv:quant-ph/9503016)](https://arxiv.org/abs/quant-ph/9503016) ←1995年に示されている
# N,C"QCQI" 「4.3 制御演算」に説明あり
tof=CV(2,0)*CNOT(2,1)*CVdg(1,0)*CNOT(2,1)*CV(1,0)
CircuitPlot(tof,nqubits=3)
represent(tof, nqubits=3)
# 2(0.5−0.5𝑖)(0.5+0.5𝑖) = 1
# (0.5−0.5𝑖)^2+(0.5+0.5𝑖)^2 = 0
# +
def Toff(q0,q1,q2):
return T(q0)*S(q1)*CNOT(q0,q1)*Tdg(q1)*CNOT(q0,q1)\
*H(q2)*Tdg(q1)*T(q2)*CNOT(q0,q2)*Tdg(q2)*CNOT(q1,q2)\
*T(q2)*CNOT(q0,q2)*Tdg(q2)*CNOT(q1,q2)*H(q2)
represent(Toff(2,1,0), nqubits=3) # 行列表現を表示します
# -
CircuitPlot(Toff(2,1,0),nqubits=3)
# ### 2.3.3 量子コンピュータ版足し算回路
# * 半加算器(half adder)
# 1 桁の 2 進数を 2つ加算
halfadder=CNOT(2,1)*Toffoli(2,1,0)
CircuitPlot(halfadder,nqubits=3)
# * 全加算器(full adder)
# 1 桁の 2 進数を 3 つ加算
halfadder=CNOT(3,1)*Toffoli(3,1,0)*CNOT(2,1)*Toffoli(2,1,0)
CircuitPlot(halfadder,nqubits=4)
# > 等価な算術演算ができるという意味では量子コンピュータは古典コンピュータの上位互換といえますが、そのご利益はそれほど自明ではありません。
# ### 2.3.4 量子コンピュータ版算術論理演算
# "重ね合わせ状態を使って並列計算" ⇔ ボルンの規則による、値の取り出し(測定)は 1/N の確率
# 入力として用いる重ね合わせ状態 $\displaystyle \lvert x \rangle = \frac{1}{\sqrt{N}} \sum_{i=1}^N \lvert x_{i} \rangle $ は、$ \lvert 0 \dots 0 \rangle $ に全ての量子ビットにアダマールをかけたもの
superposition=hadamard(0,4)*Qubit('0'*4)
superposition
transpose(represent(superposition,nqubits=4))
# ### 2.3.5 万能量子計算
# (Qiita記事)[量子コンピューターがコンピューターである理由](https://qiita.com/kyamaz/items/9c99bbf66c45fca535e5)
# (Qiita記事)[量子コンピューターの計算精度とSolovay-Kitaevの定理](https://qiita.com/kyamaz/items/6d2c2708cb3351e57be1)
# N,C"QCQI" 「付録C Solovay-Kitaevの定理」
# https://www.mathstat.dal.ca/~selinger/newsynth/ ← Haskell based なサンプルがあります。(コマンド実行できます)
# ## 2.4 コピーとテレポーテーション
# ### 2.4.1 量子もつれ(エンタングルメント)とは?
# 積状態:
# $\begin{equation}
# \left.\begin{aligned}
# \big( \alpha \lvert 0 \rangle
# + \beta \lvert 1 \rangle \big)
# \otimes \big( \gamma \lvert 0 \rangle
# + \delta \lvert 1 \rangle \big)
# & = \alpha \gamma \lvert 00 \rangle
# + \alpha \delta \lvert 01 \rangle
# + \beta \gamma \lvert 10 \rangle
# + \beta \delta \lvert 11 \rangle
# \end{aligned} \right.
# \end{equation}$
# エンタングル状態:単純な積では表すことができない状態
#
# 例)Bell状態
#
# $\begin{equation}
# \left.\begin{aligned}
# \lvert \Phi_{+} \rangle =
# \frac{1}{\sqrt{2}} \lvert 00 \rangle
# + \frac{1}{\sqrt{2}} \lvert 11 \rangle
# \end{aligned} \right.
# \end{equation}$
#
# $\begin{equation}
# \left.\begin{aligned}
# \lvert \Phi_{-} \rangle =
# \frac{1}{\sqrt{2}} \lvert 00 \rangle
# - \frac{1}{\sqrt{2}} \lvert 11 \rangle
# \end{aligned} \right.
# \end{equation}$
#
# $\begin{equation}
# \left.\begin{aligned}
# \lvert \Psi_{+} \rangle =
# \frac{1}{\sqrt{2}} \lvert 01 \rangle
# + \frac{1}{\sqrt{2}} \lvert 10 \rangle
# \end{aligned} \right.
# \end{equation}$
#
# $\begin{equation}
# \left.\begin{aligned}
# \lvert \Psi_{-} \rangle =
# \frac{1}{\sqrt{2}} \lvert 01 \rangle
# - \frac{1}{\sqrt{2}} \lvert 10 \rangle
# \end{aligned} \right.
# \end{equation}$
# 計算基底でエンタングルしている状態$\lvert \Phi_{+} \rangle$の、アダマール基底を調べる
#
# $\begin{equation}
# \left.\begin{aligned}
# \lvert \Phi_{+} \rangle
# &= \frac{1}{\sqrt{2}} \lvert 00 \rangle
# + \frac{1}{\sqrt{2}} \lvert 11 \rangle \\
# &= \frac{1}{2 \sqrt{2}} \left\{ ( \lvert +\rangle + \lvert -\rangle ) \otimes ( \lvert +\rangle + \lvert -\rangle )
# + ( \lvert +\rangle - \lvert -\rangle ) \otimes ( \lvert +\rangle - \lvert -\rangle ) \right\} \\
# &= \frac{1}{2 \sqrt{2}} \left\{ \lvert ++ \rangle + \lvert +- \rangle + \lvert -+ \rangle + \lvert -- \rangle
# + \lvert ++ \rangle - \lvert +- \rangle - \lvert -+ \rangle + \lvert -- \rangle
# \right\} \\
# &= \frac{1}{\sqrt{2}} \lvert ++ \rangle
# + \frac{1}{\sqrt{2}} \lvert -- \rangle
# \end{aligned} \right.
# \end{equation}$
#
# (参考)
# [Efficient evaluation of quantum observables using entangled measurements(arXiv:1909.09119)](https://arxiv.org/abs/1909.09119)
#
# 「Entanglion」IBM Research考案には、次のようなBell状態の線型結合の状態がでてきます。
#
# $\begin{equation}
# \left.\begin{aligned}
# \lvert \Omega_{0} \rangle
# &= \frac{1}{\sqrt{2}}
# \lvert \Phi_{+} \rangle
# + \frac{1}{\sqrt{2}}
# \lvert \Psi_{-} \rangle \\
# &=\frac{1}{2}(
# \lvert 00 \rangle
# + \lvert 01 \rangle
# - \lvert 10 \rangle
# + \lvert 11 \rangle
# )
# \end{aligned} \right.
# \end{equation}$
#
# $\begin{equation}
# \left.\begin{aligned}
# \lvert \Omega_{1} \rangle
# &= -\frac{1}{\sqrt{2}}
# \lvert \Phi_{-} \rangle
# + \frac{1}{\sqrt{2}}
# \lvert \Psi_{+} \rangle \\
# &=\frac{1}{2}(
# - \lvert 00 \rangle
# + \lvert 01 \rangle
# + \lvert 10 \rangle
# + \lvert 11 \rangle
# )
# \end{aligned} \right.
# \end{equation}$
#
# $\begin{equation}
# \left.\begin{aligned}
# \lvert \Omega_{2} \rangle
# &= \frac{1}{\sqrt{2}}
# \lvert \Phi_{-} \rangle
# + \frac{1}{\sqrt{2}}
# \lvert \Psi_{+} \rangle \\
# &=\frac{1}{2}(
# \lvert 00 \rangle
# + \lvert 01 \rangle
# + \lvert 10 \rangle
# - \lvert 11 \rangle
# )
# \end{aligned} \right.
# \end{equation}$
#
# $\begin{equation}
# \left.\begin{aligned}
# \lvert \Omega_{3} \rangle
# &= \frac{1}{\sqrt{2}}
# \lvert \Phi_{+} \rangle
# - \frac{1}{\sqrt{2}}
# \lvert \Psi_{-} \rangle \\
# &=\frac{1}{2}(
# \lvert 00 \rangle
# - \lvert 01 \rangle
# + \lvert 10 \rangle
# + \lvert 11 \rangle
# )
# \end{aligned} \right.
# \end{equation}$
# ### 2.4.2 量子データはコピーできない?
# * 量子複製不可能定理(No-Cloning Theorem)
#
# ある量子状態$A_{@x}$から, 同じ量子状態を2つ($A_{@x}$と$A_{@y}$)を作ることができない.
# ある量子状態$A_{@x}$から, 同じ量子状態$A_{@y}$に移動することはできる. ただし, 元あった量子状態 $A_{@x}$ は状態を維持できない.
#
# * ”光速を超えて情報を伝えることは不可能です”
#
#
# * (補足)ベルの不等式の破れ
#
# 詳しくは, 清水明「新版 量子論の基礎」第8章
# ベルの不等式の一種「CHSH不等式」における"不等式の破れ"の解説あり
#
#
# **ベルの不等式**
# ```
# 適当な物理量の組のある相関Cについて、原因と結果が逆転したり
# しないまともな理論の範囲内では、
# −2 ≤ C ≤ 2
# をどんな実在論も満たす。
# ```
# <font color="red">しかし、量子論はその不等式を破る場合がある。$|C| = 2\sqrt{2}$ の例が示せる。</font>
#
# ### 2.4.3 量子データを転送する
# * 量子テレポーテーション
qtelep = Mz(2)*Mz(1)*H(2)*CNOT(2,1)*CNOT(1,0)*H(1)
CircuitPlot(qtelep,nqubits=3, labels=["0_{b}","0_{a}","\psi_{a}"])
# $\begin{equation}
# \left.\begin{aligned}
# \Big[
# \lvert \psi \rangle _{a}
# \otimes
# \lvert 0 \rangle _{a}
# \Big]
# \lvert 0 \rangle _{b}
# \end{aligned} \right.
# \end{equation}$
# $\begin{equation}
# \left.\begin{aligned}
# \otimes H \cdots \Rightarrow
# \Big[
# \lvert \psi \rangle _{a}
# \otimes
# \frac{1}{\sqrt{2}} \left(
# \lvert 0 \rangle _{a} + \lvert 1 \rangle _{a}
# \right)
# \Big]
# \lvert 0 \rangle _{b}
# \end{aligned} \right.
# \end{equation}$
# $\begin{equation}
# \left.\begin{aligned}
# \otimes CNOT _{1} \cdots \Rightarrow
# \lvert \psi \rangle _{a}
# \otimes
# \frac{1}{\sqrt{2}} \left(
# \lvert 0 \rangle _{a} \lvert 0 \rangle _{b} + \lvert 1 \rangle _{a} \lvert 1 \rangle _{b}
# \right)
# \end{aligned} \right.
# \end{equation}$
# $\begin{equation}
# \left.\begin{aligned}
# \quad \quad \quad =
# \left( \alpha \lvert 0 \rangle _{a}
# + \beta \lvert 1 \rangle _{a} \right)
# \otimes
# \frac{1}{\sqrt{2}} \left(
# \lvert 0 \rangle _{a} \lvert 0 \rangle _{b} + \lvert 1 \rangle _{a} \lvert 1 \rangle _{b}
# \right)
# \end{aligned} \right.
# \end{equation}$
# $\begin{equation}
# \left.\begin{aligned}
# \otimes CNOT _{2} \cdots \Rightarrow
# \frac{1}{\sqrt{2}} \left(
# \alpha \lvert 0 \rangle _{a} \lvert 0 \rangle _{a} \lvert 0 \rangle _{b}
# + \alpha \lvert 0 \rangle _{a} \lvert 1 \rangle _{a} \lvert 1 \rangle _{b}
# +
# \beta \lvert 1 \rangle _{a} \lvert 1 \rangle _{a} \lvert 0 \rangle _{b}
# + \beta \lvert 1 \rangle _{a} \lvert 0 \rangle _{a} \lvert 1 \rangle _{b}
# \right)
# \end{aligned} \right.
# \end{equation}$
# $\begin{equation}
# \left.\begin{aligned}
# \otimes H \cdots \Rightarrow
# (1/2) (
# &+ \alpha \lvert 0 \rangle _{a} \lvert 0 \rangle _{a} \lvert 0 \rangle _{b}
# +
# \alpha \lvert 1 \rangle _{a} \lvert 0 \rangle _{a} \lvert 0 \rangle _{b} \\
# & +
# \alpha \lvert 0 \rangle _{a} \lvert 1 \rangle _{a} \lvert 1 \rangle _{b}
# +
# \alpha \lvert 1 \rangle _{a} \lvert 1 \rangle _{a} \lvert 1 \rangle _{b} \\
# & +
# \beta \lvert 0 \rangle _{a} \lvert 1 \rangle _{a} \lvert 0 \rangle _{b}
# -
# \beta \lvert 1 \rangle _{a} \lvert 1 \rangle _{a} \lvert 0 \rangle _{b} \\
# & +
# \beta \lvert 0 \rangle _{a} \lvert 0 \rangle _{a} \lvert 1 \rangle _{b}
# -
# \beta \lvert 1 \rangle _{a} \lvert 0 \rangle _{a} \lvert 1 \rangle _{b}
# )
# \end{aligned} \right.
# \end{equation}$
# $\begin{equation}
# \left.\begin{aligned}
# \quad \quad \quad =
# (1/2) (
# &+ \lvert 0 \rangle _{a} \lvert 0 \rangle _{a} ( \alpha \lvert 0 \rangle _{b} +\beta \lvert 1 \rangle _{b}) \\
# & +
# \lvert 0 \rangle _{a} \lvert 1 \rangle _{a} ( \alpha \lvert 1 \rangle _{b} + \beta \lvert 0 \rangle _{b}) \\
# &+
# \lvert 1 \rangle _{a} \lvert 0 \rangle _{a} ( \alpha \lvert 0 \rangle _{b} - \beta \lvert 1 \rangle _{b} )\\
# & +
# \lvert 1 \rangle _{a} \lvert 1 \rangle _{a} ( \alpha \lvert 1 \rangle _{b} - \beta \lvert 0 \rangle _{b}) )
# \end{aligned} \right.
# \end{equation}$
# ### 2.4.4 量子ゲート操作を転送する
# * ゲートテレポーテーション
# * 測定型量子計算
gate_qtelep=T(0)*CNOT(1,0)*Tdg(0)*T(0)*CNOT(0,1)
represent(gate_qtelep,nqubits=2)
CircuitPlot(gate_qtelep,nqubits=2)
matrix_to_qubit(represent(gate_qtelep*H(0)*Qubit('00'),nqubits=2))
matrix_to_qubit(represent(gate_qtelep*H(0)*Qubit('10'),nqubits=2))
# CNOT(の制御側)と T の交換
t_cnot=CNOT(0,1)*T(0)
represent(t_cnot,nqubits=2)
cnot_t=T(0)*CNOT(0,1)
represent(t_cnot,nqubits=2)
# $ TXT^{\dagger} $の計算
TXTdg = T(0)*X(0)*Tdg(0)
represent(exp(I*pi/4)*TXTdg, nqubits=1)
represent(S(0)*X(0), nqubits=1)
# Controlled- SX
def CSX(c,t): return CNOT(c,t)*CGateS([c],S(t))
represent(CSX(1,0),nqubits=2)
CircuitPlot(CSX(1,0),nqubits=2)
gate_qtelep2 = CSX(1,0)*T(0)*CNOT(0,1)
represent(gate_qtelep2,nqubits=2)
gate_qtelep3 = CSX(1,0)*CNOT(0,1)*T(0)
represent(gate_qtelep3,nqubits=2)
# (参考)6.4.4 魔法状態(magic state)とは?
#
# $ \displaystyle \lvert T _{L} \rangle = T \lvert + _{L} \rangle = e^{-i\frac{\pi}{8} Z} \frac{1}{\sqrt{2}} ( \lvert 0 _{L}\rangle + \lvert 1 _{L} \rangle )$
#
# 魔法状態蒸留(magic state distillation)… 十分にエラーの少ない $ \lvert T _ {L} \rangle $ を作り出す方法
#
# 1回の魔法状態上流に少なくとも、15量子ビットの補助量子ビットが必要。$l$回行うと$15^{l}$個の補助量子ビットが必要で、それだけかけて、精度の高い1論理 T ゲート(正確には「魔法状態」)が作り上げられる。
| docs/20210120/quantum_computing_2_1-2_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left">
# ## _*Quantum Counterfeit Coin Problem*_
#
# The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial.
#
# ***
# ### Contributors
# <NAME>, <NAME>
# ## Introduction
#
# The counterfeit coin problem is a classic puzzle first proposed by E. D. Schell in the January 1945 edition of the *American Mathematical Monthly*:
#
# >You have eight similar coins and a beam balance. At most one coin is counterfeit and hence underweight. How can you detect whether there is an underweight coin, and if so, which one, using the balance only twice?
#
# The answer to the above puzzle is affirmative. What happens when we can use a quantum beam balance?
#
# Given a quantum beam balance and a counterfeit coin among $N$ coins, there is a quantum algorithm that can find the counterfeit coin by using the quantum balance only once (and independent of $N$, the number of coins!). On the other hand, any classical algorithm requires at least $\Omega(\log{N})$ uses of the beam balance. In general, for a given $k$ counterfeit coins of the same weight (but different from the majority of normal coins), there is [a quantum algorithm](https://arxiv.org/pdf/1009.0416.pdf) that queries the quantum beam balance for $O(k^{1/4})$ in contrast to any classical algorithm that requires $\Omega(k\log{(N/k)})$ queries to the beam balance. This is one of the wonders of quantum algorithms, in terms of query complexity that achieves quartic speed-up compared to its classical counterpart.
#
# ## Quantum Procedure
# Hereafter we describe a step-by-step procedure to program the Quantum Counterfeit Coin Problem for $k=1$ counterfeit coin with the IBM Q Experience. [Terhal and Smolin](https://arxiv.org/pdf/quant-ph/9705041.pdf) were the first to show that it is possible to identify the false coin with a single query to the quantum beam balance.
#
# ### Preparing the environment
# First, we prepare the environment.
# +
# useful additional packages
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
# useful math functions
from math import pi, cos, acos, sqrt
# importing Qiskit
from qiskit import Aer, IBMQ, execute
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
# import basic plot tools
from qiskit.tools.visualization import plot_histogram
# -
# Load saved IBMQ accounts
IBMQ.load_accounts()
# ### Setting the number of coins and the index of false coin
#
# Next, we set the number of coins and the index of the counterfeit coin. The former determines the quantum superpositions used by the algorithm, while the latter determines the quantum beam balance.
# +
M = 16 # Maximum number of physical qubits available
numberOfCoins = 8 # This number should be up to M-1, where M is the number of qubits available
indexOfFalseCoin = 6 # This should be 0, 1, ..., numberOfCoins - 1, where we use Python indexing
if numberOfCoins < 4 or numberOfCoins >= M:
raise Exception("Please use numberOfCoins between 4 and ", M-1)
if indexOfFalseCoin < 0 or indexOfFalseCoin >= numberOfCoins:
raise Exception("indexOfFalseCoin must be between 0 and ", numberOfCoins-1)
# -
# ### Querying the quantum beam balance
#
# As in a classical algorithm to find the false coin, we will use the balance by placing the same number of coins on the left and right pans of the beam. The difference is that in a quantum algorithm, we can query the beam balance in superposition. To query the quantum beam balance, we use a binary query string to encode coins placed on the pans; namely, the binary string `01101010` means to place coins whose indices are 1, 2, 4, and 6 on the pans, while the binary string `01110111` means to place all coins but those with indices 0 and 4 on the pans. Notice that we do not care how the selected coins are placed on the left and right pans, because their results are the same: it is balanced when no false coin is included, and tilted otherwise.
#
# In our example, because the number of coins is $8$ and the index of false coin is $3$, the query `01101010` will result in balanced (or, $0$), while the query `01110111` will result in tilted (or, $1$). Using two quantum registers to query the quantum balance, where the first register is for the query string and the second register for the result of the quantum balance, we can write the query to the quantum balance (omitting the normalization of the amplitudes):
#
# \begin{eqnarray}
# |01101010\rangle\Big( |0\rangle - |1\rangle \Big) &\xrightarrow{\mbox{Quantum Beam Balance}}& |01101010\rangle\Big( |0\oplus 0\rangle - |1 \oplus 0\rangle \Big) = |01101010\rangle\Big( |0\rangle - |1\rangle \Big)\\
# |01110111\rangle\Big( |0\rangle - |1\rangle \Big) &\xrightarrow{\mbox{Quantum Beam Balance}}& |01110111\rangle\Big( |0 \oplus 1\rangle - |1 \oplus 1\rangle \Big) = (-1) |01110111\rangle\Big( |0 \rangle - |1 \rangle \Big)
# \end{eqnarray}
#
# Notice that in the above equation, the phase is flipped if and only if the binary query string is $1$ at the index of the false coin. Let $x \in \left\{0,1\right\}^N$ be the $N$-bit query string (that contains even number of $1$s), and let $e_k \in \left\{0,1\right\}^N$ be the binary string which is $1$ at the index of the false coin and $0$ otherwise. Clearly,
#
# $$
# |x\rangle\Big(|0\rangle - |1\rangle \Big) \xrightarrow{\mbox{Quantum Beam Balance}} \left(-1\right) ^{x\cdot e_k} |x\rangle\Big(|0\rangle - |1\rangle \Big),
# $$
# where $x\cdot e_k$ denotes the inner product of $x$ and $e_k$.
#
# Here, we will prepare the superposition of all binary query strings with even number of $1$s. Namely, we want a circuit that produces the following transformation:
#
# $$
# |0\rangle \rightarrow \frac{1}{2^{(N-1)/2}}\sum_{x\in \left\{0,1\right\}^N~\mbox{and}~|x|\equiv 0 \mod 2} |x\rangle,
# $$
#
# where $|x|$ denotes the Hamming weight of $x$.
#
# To obtain such superposition of states of even number of $1$s, we can perform Hadamard transformation on $|0\rangle$ to obtain superposition of $\sum_{x\in\left\{0,1\right\}^N} |x\rangle$, and check if the Hamming weight of $x$ is even. It can be shown that the Hamming weight of $x$ is even if and only if $x_1 \oplus x_2 \oplus \ldots \oplus x_N = 0$. Thus, we can transform:
#
# \begin{equation}
# |0\rangle|0\rangle \xrightarrow{H^{\oplus N}} \frac{1}{2^{N/2}}\sum_x |x\rangle |0\rangle \xrightarrow{\mbox{XOR}(x)} \frac{1}{2^{N/2}}\sum_x |x\rangle |0\oplus x_1 \oplus x_2 \oplus \ldots \oplus x_N\rangle
# \end{equation}
#
# The right-hand side of the equation can be divided based on the result of the $\mbox{XOR}(x) = x_1 \oplus \ldots \oplus x_N$, namely,
#
# $$
# \frac{1}{2^{(N-1)/2}}\sum_{x\in \left\{0,1\right\}^N~\mbox{and}~|x|\equiv 0 \mod 2} |x\rangle|0\rangle + \frac{1}{2^{(N-1)/2}}\sum_{x\in \left\{0,1\right\}^N~\mbox{and}~|x|\equiv 1 \mod 2} |x\rangle|1\rangle.
# $$
#
# Thus, if we measure the second register and observe $|0\rangle$, the first register is the superposition of all binary query strings we want. If we fail (observe $|1\rangle$), we repeat the above procedure until we observe $|0\rangle$. Each repetition is guaranteed to succeed with probability exactly half. Hence, after several repetitions we should be able to obtain the desired superposition state. *Notice that we can perform [quantum amplitude amplification](https://arxiv.org/abs/quant-ph/0005055) to obtain the desired superposition states with certainty and without measurement. The detail is left as an exercise*.
#
# Below is the procedure to obtain the desired superposition state with the classical `if` of the QuantumProgram. Here, when the second register is zero, we prepare it to record the answer to quantum beam balance.
# +
# Creating registers
# numberOfCoins qubits for the binary query string and 1 qubit for working and recording the result of quantum balance
qr = QuantumRegister(numberOfCoins+1)
# for recording the measurement on qr
cr = ClassicalRegister(numberOfCoins+1)
circuitName = "QueryStateCircuit"
queryStateCircuit = QuantumCircuit(qr, cr)
N = numberOfCoins
# Create uniform superposition of all strings of length N
for i in range(N):
queryStateCircuit.h(qr[i])
# Perform XOR(x) by applying CNOT gates sequentially from qr[0] to qr[N-1] and storing the result to qr[N]
for i in range(N):
queryStateCircuit.cx(qr[i], qr[N])
# Measure qr[N] and store the result to cr[N]. We continue if cr[N] is zero, or repeat otherwise
queryStateCircuit.measure(qr[N], cr[N])
# we proceed to query the quantum beam balance if the value of cr[0]...cr[N] is all zero
# by preparing the Hadamard state of |1>, i.e., |0> - |1> at qr[N]
queryStateCircuit.x(qr[N]).c_if(cr, 0)
queryStateCircuit.h(qr[N]).c_if(cr, 0)
# we rewind the computation when cr[N] is not zero
for i in range(N):
queryStateCircuit.h(qr[i]).c_if(cr, 2**N)
# -
# ### Constructing the quantum beam balance
#
# The quantum beam balance returns $1$ when the binary query string contains the position of the false coin and $0$ otherwise, provided that the Hamming weight of the binary query string is even. Notice that previously, we constructed the superposition of all binary query strings whose Hamming weights are even. Let $k$ be the position of the false coin, then with regards to the binary query string $|x_1,x_2,\ldots,x_N\rangle|0\rangle$, the quantum beam balance simply returns $|x_1,x_2,\ldots,x_N\rangle|0\oplus x_k\rangle$, that can be realized by a CNOT gate with $x_k$ as control and the second register as target. Namely, the quantum beam balance realizes
#
# $$
# |x_1,x_2,\ldots,x_N\rangle\Big(|0\rangle - |1\rangle\Big) \xrightarrow{\mbox{Quantum Beam Balance}} |x_1,x_2,\ldots,x_N\rangle\Big(|0\oplus x_k\rangle - |1 \oplus x_k\rangle\Big) = \left(-1\right)^{x\cdot e_k} |x_1,x_2,\ldots,x_N\rangle\Big(|0\rangle - |1\rangle\Big)
# $$
#
# Below we apply the quantum beam balance on the desired superposition state.
k = indexOfFalseCoin
# Apply the quantum beam balance on the desired superposition state (marked by cr equal to zero)
queryStateCircuit.cx(qr[k], qr[N]).c_if(cr, 0)
# ### Identifying the false coin
#
# In the above, we have queried the quantum beam balance once. How to identify the false coin after querying the balance? We simply perform a Hadamard transformation on the binary query string to identify the false coin. Notice that, under the assumption that we query the quantum beam balance with binary strings of even Hamming weight, the following equations hold.
#
# \begin{eqnarray}
# \frac{1}{2^{(N-1)/2}}\sum_{x\in \left\{0,1\right\}^N~\mbox{and}~|x|\equiv 0 \mod 2} |x\rangle &\xrightarrow{\mbox{Quantum Beam Balance}}& \frac{1}{2^{(N-1)/2}}\sum_{x\in \left\{0,1\right\}^N~\mbox{and}~|x|\equiv 0 \mod 2} \left(-1\right)^{x\cdot e_k} |x\rangle\\
# \frac{1}{2^{(N-1)/2}}\sum_{x\in \left\{0,1\right\}^N~\mbox{and}~|x|\equiv 0 \mod 2} \left(-1\right)^{x\cdot e_k} |x\rangle&\xrightarrow{H^{\otimes N}}& \frac{1}{\sqrt{2}}\Big(|e_k\rangle+|\hat{e_k}\rangle\Big)
# \end{eqnarray}
#
# In the above, $e_k$ is the bitstring that is $1$ only at the position of the false coin, and $\hat{e_k}$ is its inverse. Thus, by performing the measurement in the computational basis after the Hadamard transform, we should be able to identify the false coin because it is the one whose label is different from the majority: when $e_k$, the false coin is labelled $1$, and when $\hat{e_k}$ the false coin is labelled $0$.
# +
# Apply Hadamard transform on qr[0] ... qr[N-1]
for i in range(N):
queryStateCircuit.h(qr[i]).c_if(cr, 0)
# Measure qr[0] ... qr[N-1]
for i in range(N):
queryStateCircuit.measure(qr[i], cr[i])
# -
# Now we perform the experiment to see how we can identify the false coin by the above quantum circuit. Notice that when we use the `plot_histogram`, the numbering of the bits in the classical register is from right to left, namely, `0100` means the bit with index $2$ is one and the rest are zero.
#
# Because we use `cr[N]` to control the operation prior to and after the query to the quantum beam balance, we can detect that we succeed in identifying the false coin when the left-most bit is $0$. Otherwise, when the left-most bit is $1$, we fail to obtain the desired superposition of query bitstrings and must repeat from the beginning. *Notice that we have not queried the quantum beam oracle yet. This repetition is not neccesary when we feed the quantum beam balance with the superposition of all bitstrings of even Hamming weight, which can be done with probability one, thanks to the quantum amplitude amplification*.
#
# When the left-most bit is $0$, the index of the false coin can be determined by finding the one whose values are different from others. Namely, when $N=8$ and the index of the false coin is $3$, we should observe `011110111` or `000001000`.
# +
backend = Aer.backends("qasm_simulator")[0]
shots = 1 # We perform a one-shot experiment
success = 0
# Run until successful
while not success:
results = execute(queryStateCircuit, backend=backend, shots=shots).result()
answer = results.get_counts()
for key, value in answer.items():
if key[0:1] != "1":
success = 1
plot_histogram(answer)
from collections import Counter
for key in answer.keys():
normalFlag, _ = Counter(key[1:]).most_common(1)[0] #get most common label
for i in range(2,len(key)):
if key[i] != normalFlag:
print("False coin index is: ", len(key) - i - 1)
# -
# ## About Quantum Counterfeit Coin Problem
#
# The case when there is a single false coin, as presented in this notebook, is essentially [the Bernstein-Vazirani algorithm](http://epubs.siam.org/doi/abs/10.1137/S0097539796300921), and the single-query coin-weighing algorithm was first presented in 1997 by [<NAME> Smolin](https://arxiv.org/pdf/quant-ph/9705041.pdf). The Quantum Counterfeit Coin Problem for $k > 1$ in general is studied by [Iwama et al.](https://arxiv.org/pdf/1009.0416.pdf) Whether there exists a quantum algorithm that only needs $o(k^{1/4})$ queries to identify all the false coins remains an open question.
| community/games/quantum_counterfeit_coin_problem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Practical Session 2: Classification algorithms
#
# *Notebook by <NAME>*
# ## 0.1 Your task
#
# In practical 1, you worked with the housing prices and bike sharing datasets on the tasks that required you to predict some value (e.g., price of a house) or amount (e.g., the count of rented bikes, or the number of registered users) based on a number of attributes – age of the house, number of rooms, income level of the house owners for the house price prediction (or weather conditions and time of the day for the prediction of the number of rented bikes). That is, you were predicting some continuous value.
#
# This time, your task is to predict a particular category the instance belongs to based on its characteristics. This type of tasks is called *classification*.
# ## 0.2 Dataset
#
# First you will look into the famous [*Iris dataset*](https://en.wikipedia.org/wiki/Iris_flower_data_set), which was first introduced by the British statistician and biologist <NAME> in his 1936 paper *The use of multiple measurements in taxonomic problems*. The dataset contains $4$ characteristics (sepal length and width, and petal length and width) for $3$ related species of irises – *setosa*, *versicolor* and *virginica*. Your task is to learn to predict, based on these $4$ characteristics, the type of an iris.
#
# For further reference, see the original paper: <NAME> (1936). *The use of multiple measurements in taxonomic problems*. Annals of Eugenics. 7 (2): 179–188.
# ## 0.3 Learning objectives
#
# In this practical you will learn about:
# - binary and multiclass classification
# - linearly separable data
# - the use of a number of classifiers, including Naive Bayes, Logistic Regression, and Perceptron
# - kernel trick
# - ways to evaluate the performance of a classification model, including accuracy, precision, recall and F$_1$ measure
# - precision-recall trade-off and the ways to measure it, including ROC curves and AUC
#
# In addition, you will learn about the dataset uploading routines with `sklearn`.
# ## Step 1: Uploading and inspecting the data
#
# As before, let's start by uploading and looking into the data. In the previous practical, you worked with the data collected independently and stored in a comma-separated file, as the data in a real-life data science project might be. In this practical, you will learn how to use `sklearn`'s data uploading routines.
#
# `sklearn` has a number of datasets to practice your ML skills on, and the `iris` dataset is one of them. Here is how you can access the dataset through `sklearn`. Note that such data fields as *data* and *target* are already pre-defined for you:
from sklearn import datasets
iris = datasets.load_iris()
list(iris.keys())
# Take a look into what is contained in *data*. Remember that each instance of an iris is described in terms of $4$ variables – *sepal length*, *sepal width*, *petal length*, and *petal width*:
iris.data
# To find out what variables are contained in `data`, check the `feature_names` data field.
iris.feature_names
# What about the target values?
iris.target
# There are $3$ classes of irises – *setosa*, *versicolor*, and *virginica*, and they are already converted into numerical values for you (recall, that when the dataset is not already preprocessed this way, and the target or any of the arrtibutes are represented as text or categorical data, you need to convert them into numerical data). If you want to check what each numerical label corresponds to in the original data, you can do so accessing the `target_names` data field:
iris.target_names
# Remember, that for further ML experiments, we need to have two data structures: the instance-by-attributes matrix $X$ and the target labels vector $y$. For instance, in the previous practical the regression algorithm learned the vector of weights $w$ to predict the target variable $\hat y^{(i)}$ for each instance $i$ so that its prediction would be maximally close to the actual label $y^{(i)}$. Since the labels $y^{(i)}$ were continuous (i.e., amount, number, or value), you measured the performance of your regressor by the distance between the predictions $\hat y$ and actual labels $y$. In this practical, you will need to work with $X$ and $y$, too, but the vector of $y$ this time will contain discrete values – classes $[0, 1, 2]$ for the different types of the flower.
#
# As you might have already figured out, you need to initialise $X$ and $y$ with the `data` and `target` fields of your iris dataset:
X, y = iris["data"], iris["target"]
print(X.shape)
print(y.shape)
# Let's look closer into the data to get a feel for what is contained in it. As before, let's use visualisations with `matplotlib`, and in particular, plot one attribute against another for the three types of irises using scatterplot. E.g., let's plot sepal length vs. sepal width:
# +
# %matplotlib inline
#so that the plot will be displayed in the notebook
import numpy as np
np.random.seed(42)
import matplotlib
from matplotlib import pyplot as plt
# visualise sepal length vs. sepal width
X = iris.data[:, :2]
y = iris.target
scatter_x = np.array(X[:, 0])
scatter_y = np.array(X[:, 1])
group = np.array(y)
cmap = matplotlib.cm.get_cmap('jet')
cdict = {0: cmap(0.1), 1: cmap(0.5), 2: cmap(0.9)}
labels = iris.target_names
fig, ax = plt.subplots(figsize=(8, 6))
for g in np.unique(group):
ix = np.where(group == g)
ax.scatter(scatter_x[ix], scatter_y[ix], c=np.array([cdict[g]]), #c = cdict[g],
label = labels[g], s = 100, marker = "H",
linewidth=2, alpha = 0.5)
ax.legend()
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.show()
# -
# It looks like *setosa* is quite clearly distiguishable from the other two types of irises with these two features. What about petal length and width?
# +
# visualise petal length vs. petal width
X = iris.data[:, 2:]
y = iris.target
scatter_x = np.array(X[:, 0])
scatter_y = np.array(X[:, 1])
group = np.array(y)
cmap = matplotlib.cm.get_cmap('jet')
cdict = {0: cmap(0.1), 1: cmap(0.5), 2: cmap(0.9)}
labels = iris.target_names
fig, ax = plt.subplots(figsize=(8, 6))
for g in np.unique(group):
ix = np.where(group == g)
ax.scatter(scatter_x[ix], scatter_y[ix], c=np.array([cdict[g]]), #c = cdict[g],
label = labels[g], s = 100, marker = "H",
linewidth=2, alpha = 0.5)
ax.legend()
plt.xlabel('Petal length')
plt.ylabel('Petal width')
plt.show()
# -
# This plot shows an even clearer separation between the class of *setosa* irises and the other two classes. In fact, with respect to these two attributes, it might be possible to clearly separate not only *setosas* from the other two classes, but also, with certain success, *versicolors* from *virginicas*.
#
# When the data can be separated by a straight line (or a single decision surface) as in the example above, it is called *linearly separable*. This property of the data is successfully exploited by ML models that try to learn a linear separation boundary between the classes. In fact, there is a whole set of lines that you can use to separate the class of *setosas* from the other two classes, *versicolors* and *virginicas*, in this example. Some linear models explicitly allow you to select the *best* separation boundary by maximising the distance between the boundary and the closest instances of the two classes. Such are, for example, [Support Vector Machines](http://scikit-learn.org/stable/modules/svm.html), which will be covered in the [Part II Machine Learning and Bayesian Inference course](https://www.cl.cam.ac.uk/teaching/2021/MLBayInfer/):
# +
# visualise petal length vs. petal width
X = iris.data[:, 2:]
y = iris.target
scatter_x = np.array(X[:, 0])
scatter_y = np.array(X[:, 1])
group = np.array(y)
cmap = matplotlib.cm.get_cmap('jet')
cdict = {0: cmap(0.1), 1: cmap(0.5), 2: cmap(0.9)}
labels = iris.target_names
fig, ax = plt.subplots(figsize=(8, 6))
for g in np.unique(group):
ix = np.where(group == g)
ax.scatter(scatter_x[ix], scatter_y[ix], c=np.array([cdict[g]]), #c = cdict[g],
label = labels[g], s = 100, marker = "H",
linewidth=2, alpha = 0.5)
ax.legend()
plt.xlabel('Petal length')
plt.ylabel('Petal width')
for i in range(0, 9):
plt.plot([3 + 0.5*i, 0], [0, 3-0.25*i], 'k-', color=cmap(0.1*(i+1)))
plt.show()
# -
# For the sake of consistency, let's plot all the pairs of features against each other. Note that all plots confirm that *setosas* are linearly separable from the other two types of irises (you might notice an occasional outlier, though), while *versicolors* are linearly separable from *virginicas* with respect to some attributes only:
# +
fig = plt.figure(figsize=(16, 12))
fig.subplots_adjust(hspace=0.2, wspace=0.2)
X = iris.data
y = iris.target
labels = iris.target_names
index = 1
for i in range(0, X.shape[1]):
for j in range(0, X.shape[1]):
scatter_x = np.array(X[:, i])
scatter_y = np.array(X[:, j])
group = np.array(y)
cmap = matplotlib.cm.get_cmap('jet')
cdict = {0: cmap(0.1), 1: cmap(0.5), 2: cmap(0.9)}
ax = fig.add_subplot(X.shape[1], X.shape[1], index)
index+=1
for g in np.unique(group):
ix = np.where(group == g)
ax.scatter(scatter_x[ix], scatter_y[ix], c=np.array([cdict[g]]), #c = cdict[g],
label = labels[g], s = 50, marker = "H",
linewidth=1, alpha = 0.5)
plt.show()
# -
# ## Step 2: Splitting the data into training and test subsets
#
# Before applying the classifiers, let's split the dataset into the training and test sets. Recall, that when building an ML model, all further data exploration, feature selection and scaling, model selection and fine-tuning should be done on the training data, and the test data should only be used at the final step to evaluate the best estimated model:
# +
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(X, test_size=0.2)
print(len(train_set), "training instances +", len(test_set), "test instances")
# -
# As before, you want your training and test data to contain enough representative examples of each class, that is, you should rather apply `StratifiedShuffleSplit` and not random splitting:
# +
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
split.get_n_splits(X, y)
print(split)
for train_index, test_index in split.split(X, y):
print("TRAIN:", len(train_index), "TEST:", len(test_index))
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
# -
# Let's check the class proportions in the original dataset, and training and test subsets:
# +
import pandas as pd
# def original_proportions(data):
# props = {}
# for value in set(data["target"]):
# data_value = [i for i in data["target"] if i==value]
# props[value] = len(data_value) / len(data["target"])
# return props
def subset_proportions(subset):
props = {}
for value in set(subset):
data_value = [i for i in subset if i==value]
props[value] = len(data_value) / len(subset)
return props
compare_props = pd.DataFrame({
"Overall": subset_proportions(iris["target"]),
"Stratified tr": subset_proportions(y_train),
"Stratified ts": subset_proportions(y_test),
})
compare_props["Strat. tr %error"] = 100 * compare_props["Stratified tr"] / compare_props["Overall"] - 100
compare_props["Strat. ts %error"] = 100 * compare_props["Stratified ts"] / compare_props["Overall"] - 100
compare_props.sort_index()
# -
# The original dataset is well-balanced – it contains exactly $50$ examples for each class. With the stratified data splits, you get equal proportions of each type of the irises in the training and test sets, too.
#
# Now, let's first approach the classification task in a simpler setting: let's start with *binary classification* and try to predict whether an iris is of a particular type: e.g., *setosa* vs. *not-a-setosa*, or *versicolor* vs. *not-a-versicolor*.
#
#
# ## Case 1: Binary classification
#
# Let's start by separating the data that describes *setosa* from other data.
y_train_setosa = (y_train == 0) # will return True when the label is 0 (i.e., setosa)
y_test_setosa = (y_test == 0)
y_test_setosa
# `y_test_setosa` returns a boolean vector of $30$ test instances: it contains `True` for the test instances that are *setosas*, and `False` otherwise. Let's pick one example of a *setosa* – for instance, the first one from the test set, `X_test[0]`, for further evaluations.
setosa_example = X_test[0]
# As you've noticed above, *setosas* are linearly separable from the other two classes, so it would be reasonable to apply a linear model to the task of separating *setosas* from *not-setosas*.
#
# ### Perceptron
#
# A (single-layer) perceptron is a simple linear classifier that tries to learn the set of weights $w$ for the input vectors $X$ in order to predict the output binary class values $y$. In particular:
#
# \begin{equation}
# \hat y^{(i)}=\begin{cases}
# 1, & \text{if $w \cdot x^{(i)} + b > 0$}\\
# 0, & \text{otherwise}
# \end{cases}
# \end{equation}
#
# where $w \cdot x^{(i)}$ is the dot product of weight vector $w$ and the feature vector $x^{(i)}$ for the instance $i$, $\sum_{j=1}^{m} w_{j}x_{j}^{(i)}$, and $b$ is the bias term.
#
# `sklearn` has a perceptron implementation through its linear [`SGDClassifier`](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html#sklearn.linear_model.SGDClassifier) with the following parameter settings:
# +
from sklearn.linear_model import SGDClassifier
sgd = SGDClassifier(max_iter=5, tol=None, random_state=42,
loss="perceptron", eta0=1, learning_rate="constant", penalty=None)
sgd.fit(X_train, y_train_setosa)
sgd.predict([setosa_example])
# -
# The perceptron correctly predicts that `X_test[0]` is a *setosa* flower. However, as you've seen above, not all types of irises are linearly separable. Let's select a more challenging example of a *versicolor* (class $1$) for comparison.
y_train_versicolor = (y_train == 1) # True when the label is 1 (i.e., versicolor)
y_test_versicolor = (y_test == 1)
y_test_versicolor
# Select one of the examples to try your classifier on (array indexing in python starts with $0$, so you can pick indexes $2$, $3$, $5$ and so on):
# +
versicolor_example = X_test[17]
print("Class", y_test[17], "(", iris.target_names[y_test[17]], ")")
sgd.fit(X_train, y_train_versicolor)
print(sgd.predict([versicolor_example]))
# -
# Looks like perceptron indeed cannot predict the class of the *versicolor* example correctly. Let's see if another linear classifier – `Logistic Regression` – can do a better job.
#
# ### Logistic Regression
#
# You used `Linear Regression` model in practical 1 to predict continuous values. In contrast, `Logistic Regression` is used for binary classification, that is, to predict a discrete value of $0$ or $1$. In particular, it estimates the probability that an instance belongs to a particular class (e.g., that $X\_test[0] \in {setosa}$). If the probability is greater than $50\%$, the instance is classified as *setosa* (positive class, labelled $1$ or $True$). Otherwise, it is classified as *not-a-setosa* (negative class, labelled $0$ or $False$).
#
# Similarly to `Linear Regression`, `Logistic Regression` computes a weighted sum using the input features $w \cdot X$ plus an intercept ($w_0$), but instead of outputting the result as `Linear Regression` does, it further applies a *sigmoid function* to this result:
#
# \begin{equation}
# \hat p = \sigma (w \cdot X)
# \end{equation}
#
# The sigmoid function outputs a value between $0$ and $1$:
#
# \begin{equation}
# \sigma (t) = \frac{1}{1 + exp(-t)}
# \end{equation}
#
# Once the Logistic Regression model has estimated the probability $\hat p$, the label $\hat y$ is predicted as follows:
#
# \begin{equation}
# \hat y=\begin{cases}
# 1, & \text{if $\hat p \geq 0.5$}\\
# 0, & \text{otherwise}
# \end{cases}
# \end{equation}
#
# Note that the above is equivalent to:
#
# \begin{equation}
# \hat y=\begin{cases}
# 1, & \text{if $t \geq 0$}\\
# 0, & \text{otherwise}
# \end{cases}
# \end{equation}
#
# Let's apply a `Logistic Regression` model to our setosa example:
# +
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X_train, y_train_setosa)
# -
log_reg.predict([setosa_example])
# The model correctly predicts that the test example is indeed a setosa. What about the versicolor example?
log_reg.fit(X_train, y_train_versicolor)
log_reg.predict([versicolor_example])
# Finally, for comparison, let's introduce one more classifier, that some of you might have come across before.
#
# ### Naive Bayes
#
# If you did [Part IA Machine Learning and Real-World Data](https://www.cl.cam.ac.uk/teaching/1920/MLRD/) in the past, you might recall that you have already come across classification tasks: for example, you were asked to build a classifier that identifies sentiment in text, and you used `Naive Bayes` for that. `Naive Bayes` makes different assumptions about the data. In particular, it doesn't assume linear separability but makes the predictions based on the prior and the updated belief about the data.
#
# To remind you, on a two class problem (e.g., distinguishing between classes $0$ for *not-setosas* and $1$ for *setosas*), a Naive Bayes model will predict:
#
# \begin{equation}
# \hat y^{(i)} = argmax_{c \in (0, 1)} p(y=c | x^{(i)}) = \begin{cases}
# 1, & \text{if $\hat p(y=1 | x^{(i)}) > \hat p(y=0 | x^{(i)}$})\\
# 0, & \text{otherwise}
# \end{cases}
# \end{equation}
#
# where the probabilities are conditioned on the feature vector for $x^{(i)}$, i.e. $(f^{(i)}_{1}, ..., f^{(i)}_{n})$. In practice, it is impossible to estimate these probabilities exactly, and one uses the Naive Bayes theorem so that:
#
# \begin{equation}
# \hat p(y=c | x^{(i)}) = \frac{p(c) p(x^{(i)} | c)}{p(x^{(i)})}
# \end{equation}
#
# where $c \in \{0, 1\}$ is the class to be predicted. Since the denominator is the same for both estimates of $\hat p(y=1 | x^{(i)})$ and $\hat p(y=0 | x^{(i)})$, it can be omitted. Therefore, the estimate can be simplified to:
#
# \begin{equation}
# \hat y^{(i)} = argmax_{c \in (0, 1)} p(y=c | x^{(i)}) = argmax_{c \in (0, 1)} p(c) p(x^{(i)} | c)
# \end{equation}
#
# where $p(c)$ is the *prior belief* of the classifier about the distribution of the classes in the data, and $p(x^{(i)} | c)$ is the *posterior probability*. Both can be estimated from the training data using *maximum a posteriori (MAP)* estimation. Moreover, the "naive" independence assumption of this learning algorithm allows you to estimate $p(x^{(i)} | c)$ as a product of feature probabilities taken independently of each other, i.e.:
#
# \begin{equation}
# p(x^{(i)} | c) = p(f^{(i)}_{1}, ..., f^{(i)}_{n} | c) \approx p(f^{(i)}_{1} | c) \times ... \times p(f^{(i)}_{n} | c)
# \end{equation}
#
# `sklearn` has a number of implementations of a [`Naive Bayes`](http://scikit-learn.org/stable/modules/naive_bayes.html) algorithm which mainly differ from each other with respect to how they estimate the conditional probability above using the data and what believes they hold about the distribution of this data, e.g.:
# +
from sklearn.naive_bayes import GaussianNB, MultinomialNB
gnb = MultinomialNB() # or:
gnb = GaussianNB()
gnb.fit(X_train, y_train_setosa)
gnb.predict([setosa_example])
# +
gnb.fit(X_train, y_train_versicolor)
gnb.predict([versicolor_example])
# -
# As you can see, not all classifiers perform equally well. How do you measure their performance in a more comprehensive way?
#
# ## Step 3: Evaluation
#
# ### Performance measures
#
# The most straightforward way to evaluate a classifier is to estimate how often its predictions are correct. This estimate is called *accuracy* and it is calculated as follows:
#
# \begin{equation}
# ACC = \frac{num(\hat y == y)}{num(\hat y == y) + num(\hat y != y)} = \frac{num(\hat y == 1 \& y == 1) + num(\hat y == 0 \& y == 0)}{num(\hat y == 1 \& y == 1) + num(\hat y == 0 \& y == 0) + num(\hat y == 1 \& y == 0) + num(\hat y == 0 \& y == 1)}
# \end{equation}
#
# E.g., for the *setosa* classification example, an accuracy of the classifier is the ratio of correctly identified *setosas* and correctly identified *not-setosas* to the total number of examples.
#
# You can either import accuracy metric using `from sklearn.metrics import accuracy_score` or measure accuracy across multiple cross-validation folds (refer to practical 1, if you need to remind yourself what cross-validation does):
# +
from sklearn.model_selection import cross_val_score
print(cross_val_score(log_reg, X_train, y_train_setosa, cv=5, scoring="accuracy"))
print(cross_val_score(gnb, X_train, y_train_setosa, cv=5, scoring="accuracy"))
print(cross_val_score(sgd, X_train, y_train_setosa, cv=5, scoring="accuracy"))
# -
# All three classifiers are precfectly accurate in their prediction on the *setosa* example. What about the *versicolor* example?
print(cross_val_score(log_reg, X_train, y_train_versicolor, cv=5, scoring="accuracy"))
print(cross_val_score(gnb, X_train, y_train_versicolor, cv=5, scoring="accuracy"))
print(cross_val_score(sgd, X_train, y_train_versicolor, cv=5, scoring="accuracy"))
# The numbers differ but they still do not tell you much about the performance of a classifier in general. E.g., an accuracy of $\approx 0.71$ is far from perfect, but exactly how bad or how acceptable is it?
#
# Let's implement a brute-force algorithm that will simply predict *not-versicolor* for every instance in the *versicolor* detection case (or *not-setosa* in the *setosa* detection case). Here is how well it will perform:
# +
from sklearn.base import BaseEstimator
class NotXClassifier(BaseEstimator):
def fit(self, X, y=None):
pass
def predict(self, X):
return np.zeros((len(X), 1), dtype=bool)
notversicolor_clf = NotXClassifier()
cross_val_score(notversicolor_clf, X_train, y_train_versicolor, cv=5, scoring="accuracy")
# -
# This gives you a very clear benchmark for comparison. The above represents a *majority class baseline*: for each split in cross-validation splits, it measures the proportion of the majority class (*not-versicolor* in this case). That is, if the classifier does nothing and simply returns the majority class label every time, this is how "well" it will perform. Obviously, you want the actual classifier that you build to do better than that.
# ### Confusion matrix
#
# So now you can compare the accuracy (e.g., the proportion of correctly identified *setosas* and *not-setosas* in the dataset) to the baseline system. However, this doesn't help you understand *where* your classifier goes wrong. For example, does the low accuracy of the classifiers in the *versicolor* identification case suggest that they miss some *versicolors* and classify them as other types of irises, or does it suggest that they mistake other types of irises for *versicolors*? Or, perhaps, it's a combination of two types of mistakes?
#
# The accuracy score itself doesn't allow you to make any of these conclusions. What you need to do is to look into the number of correctly and incorrectly classified instances, and the data representation that helps you do that is called *confusion matrix*. A confusion matrix is simply a table that compares the number of actual instances of type $c$ to the number of predicted instances of type $c$, i.e.:
#
# | | predicted $\hat c=0$| predicted $\hat c=1$ |
# | ------------- | :-------------: | :-------------: |
# | **actual $c=0$** | TN | FP |
# | **actual $c=1$** | FN | TP |
#
# The instances that are classified correctly as well as those that are misclassified have certain importance for evaluation of your classification algorithms. Here is the terminology:
#
# - `TP` stands for *true positives*. These are the actual instances of class $1$ that are correctly classified as class $1$ (*setosas* identified as *setosas*);
# - `TN` stands for *true negatives*. These are the actual instances of class $0$ that are correctly classified as class $0$ (*not-setosas* identified as *not-setosas*);
# - `FN` stands for *false negatives*. These are the actual instances of class $1$ that are incorrectly classified as class $0$ (*setosas* identified as *not-setosas*, i.e. missed by the classifier);
# - and finally, `FP` are *false positives*. These are the actual instances of class $0$ that are incorrectly classified as class $1$ (*not-setosas* identified as *setosas*, i.e. other types of flower mistaken for setosas by the classifier).
#
# Now you can re-interpret the accuracy score as:
#
# \begin{equation}
# ACC = \frac{TP + TN}{TP + TN + FP + FN}
# \end{equation}
#
# So in order to maximise accuracy, you'll need to maximise on the number of true positives and true negatives.
#
# The confusion matrix also tells you what exactly is misclassified by the classifier. For example, let's look into the confusion matrices for *setosa* identification:
# +
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
y_train_pred = cross_val_predict(log_reg, X_train, y_train_setosa, cv=5)
confusion_matrix(y_train_setosa, y_train_pred)
# -
y_train_pred = cross_val_predict(gnb, X_train, y_train_setosa, cv=5)
confusion_matrix(y_train_setosa, y_train_pred)
# You know that the accuracy of these classifiers on *setosa* identification equals to $1$, so all *setosas* ($40$ instances) and *not-setosas* ($80$ instances) are classified correctly, and the confusion matrices show exactly that. What about the mistakes that the classifiers make on the *versicolor* example?
y_train_pred = cross_val_predict(log_reg, X_train, y_train_versicolor, cv=5)
confusion_matrix(y_train_versicolor, y_train_pred)
y_train_pred = cross_val_predict(gnb, X_train, y_train_versicolor, cv=5)
confusion_matrix(y_train_versicolor, y_train_pred)
# The matrices above show that the `Logistic Regression` classifier correctly identifies $71$ *not-versicolors* (`TN`) and $15$ *versicolors* (`TP`), and it also misses $25$ *versicolors* (`FN`) and mistakes $9$ other flowers for *versicolors*. Now you can see that the bigger source of error for the `Logistic Regression` classifier is that it's not good enough in identifying *versicolors* as it misses more of them that it identifies ($25$ vs $15$). In contrast, `Naive Bayes` identifies more *versicolors* in general ($36 + 5$) and more of them correctly ($36$), while it misses only $4$ *versicolor* instances.
#
# Apart from making such observations, you might want to be able to make sense of the number of correctly and incorrectly classified examples and compare the performance of the different classifiers in an aggregated way. The following set of measures will help you do so: *precision* ($P$), *recall* ($R$), and $F_{\beta}$*-measure* (e.g., $F_1$).
#
# - *Precision* measures how reliable or trustworthy your classifier is. It tells you how often when the classifier predicts that a flower is a *versicolor* (class $1$) it actually is a *versicolor*. It relies on the number of $TP$'s and $FP$'s:
# \begin{equation}
# P = \frac{TP}{TP+FP}
# \end{equation}
#
# - *Recall* measures the coverage of your classifier. It tells you how many of the actual instances of *versicolor* your classifier can detect at all. It relies on the number of $TP$'s and $FN$'s:
# \begin{equation}
# R = \frac{TP}{TP+FN}
# \end{equation}
#
# - Finally, $F_1$*-score* combines the two measures above to give you an overall idea of your classifier's performance. $F_1$*-score* is estimated as follows:
# \begin{equation}
# F_1 = 2 \times \frac{P \times R}{P+R}
# \end{equation}
#
# It is the *harmonic mean* of the two measures. However, if you want to highlight the importance of precision in your task (and we'll talk about when you might want to do so later in this practical) or the importance of recall, you can do so by changing the $\beta$ coefficient of the *F-score*. In fact, the $F_1$*-score* is the most commonly used case of a more general $F_{\beta}$*-measure*, which is estimated as follows:
# \begin{equation}
# F_{\beta} = (1 + \beta^2) \times \frac{P \times R}{\beta^2 \times P+R}
# \end{equation}
#
# $\beta < 1$ puts more emphasis on precision than on recall, and $\beta > 1$ weighs recall higher than precision.
# +
from sklearn.metrics import precision_score, recall_score, f1_score
y_train_pred = cross_val_predict(gnb, X_train, y_train_versicolor, cv=5)
precision = precision_score(y_train_versicolor, y_train_pred) # == 36 / (36 + 5)
recall = recall_score(y_train_versicolor, y_train_pred) # == 36 / (36 + 4)
f1 = f1_score(y_train_versicolor, y_train_pred)
print(precision, recall, f1)
y_train_pred = cross_val_predict(log_reg, X_train, y_train_versicolor, cv=5)
precision = precision_score(y_train_versicolor, y_train_pred) # == 15 / (15 + 9)
recall = recall_score(y_train_versicolor, y_train_pred) # == 15 / (15 + 25)
f1 = f1_score(y_train_versicolor, y_train_pred)
print(precision, recall, f1)
# -
# The numbers above tell you that the `Naive Bayes` classifier has pretty good recall ($0.9$, or it identifies $90\%$ of *versicolor* instances) as well as precision ($\approx 0.88$, or in approximately $88\%$ of the cases you can trust its *versicolor* prediction). The two numbers are close to each other in range, so the $F_{1}$ score is high, too. The `Logistic Regression` classifier is less precise (you can trust it about $62.5\%$ of the time) and it misses many *versicolor* examples (identifying only $37.5\%$ of those).
#
# Obviously, if your algorithm gets it all right, you will end up with perfect accuracy, precision and recall. In practice, however, the algorithms often don't get it all perfectly correct, and depending on the task at hand, you might decide that you are actually mostly interested in high precision of your algorithm, or in high recall.
#
# For example, imagine you are working on a machine learning classifier that detects cancerous cases in the data coming from patients' analyses. The task of your algorithm is to detect whether the patient needs further tests and closer analysis based on the preliminary tests. The cost of *false negatives* (missed cancerous cases) in this task is very high and it's better to administer further tests for a patient about which the algorithm has doubts than to ignore the case altogether. In this case, you should prioritise recall over precision.
#
# On the other hand, imagine you work for a pharmaceutical company trying to detect whether a particular drug will be applicable to a particular condition or a particular group of patients using data science and machine learning. Or, you want to learn in a data-driven way [when to change the drug dosage for patients in a hospital](http://www.cs.cornell.edu/people/tj/publications/morik_etal_99a.pdf). In these cases, the cost of *false positives*, i.e. deciding that the drug is suitable for a particular patient when it is not, or deciding to intervene in patients' treatment when the dosage should be kept as before, is more costly. These are the cases when you should prioritise precision over recall.
#
#
# ### Precision-recall trade-off
#
# Often, you can achieve perfect precision if you lower the recall, and other way around. For example, by always predicting *versicolor* you will reach perfect recall (all *versicolor* instances will be covered by such prediction), but very low precision (because in most of the cases such prediction will be unreliable as your classifier will predict *versicolor* for all setosas and all virginicas as well). On the other hand, by trying to maximise precision, you will often need to constrain your classifier so that it returns less false positives and therefore is more conservative.
#
# A good way to understand how a classifier makes its predictions is to look into its `decision_function` – the confidence score of the classifier's predictions. For `Logistic Regression` it returns a signed distance to the separation hyperplane selected by the model. Let's check the confidence score of the `Logistic Regression` model on the *versicolor* example:
# +
log_reg.fit(X_train, y_train_versicolor)
y_scores = log_reg.decision_function([versicolor_example])
y_scores
# -
# When this confidence score is higher than the predefined model's threshold, the model assigns the instance to the positive class, and otherwise it assigns the instance to the negative class. The `Logistic Regression`'s default threshold is $0$, so the example above is classified as *versicolor*:
threshold = 0
y_versicolor_pred = (y_scores > threshold)
y_versicolor_pred
# However, what would happen if you change the threshold to, for example, $0.7$? As you expect, this particular instance will be classified as *not-a-versicolor*. What effect will it have on the precision and recall of your classifier as a consequence?
threshold = 0.7
y_versicolor_pred = (y_scores > threshold)
y_versicolor_pred
# The following will return the confidence scores for each of the training set instances:
y_scores = cross_val_predict(log_reg, X_train, y_train_versicolor, cv=5, method="decision_function")
y_scores
# Let's use `sklearn`'s [`precision_recall_curve`](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html) functionality and plot the precision and recall values against the different threshold values. This shows you how the measures will be affected by changing the confidence threshold:
# +
from sklearn.metrics import precision_recall_curve
precisions, recalls, thresholds = precision_recall_curve(y_train_versicolor, y_scores)
def plot_pr_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
plt.plot(thresholds, recalls[:-1], "g--", label="Recall")
plt.xlabel("Threshold")
plt.legend(loc="upper right")
plt.ylim([0, 1])
plot_pr_vs_threshold(precisions, recalls, thresholds)
plt.show()
# -
# As you can see, the recall has a general tendency of decreasing when you increase the threshold – that is, the more conservative the classifier becomes, the more instances it is likely to miss. At the same time, precision curve is bumpier: with certain changes in the threshold, it might drop as well as the classifier misidentifies some other types of irises as *versicolor*.
#
# You can also plot precision values against recall values and track the changes in precision against recall:
# +
def plot_precision_vs_recall(precisions, recalls):
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.axis([0, 1, 0, 1])
plot_precision_vs_recall(precisions, recalls)
plt.show()
# -
# ### The Receiver Operating Characteristic (ROC)
#
# Another widely used way to inspect and present the results is to plot the receiver operating characteristic (ROC) curve. Similarly to the precision / recall curve above, it shows how the performance of the classifier changes at different threshold values. In particular, it plots the *true positive rate (TPR)* against the *false positive rate (FPR)*. Here is some more machine learning terminology:
#
# - *True positive rate (TPR)* is simply another term for recall. Alternatively, it is also called *sensitivity*, or *probability of detection*;
# - *False positive rate (FPR)* is also referred to as *fall-out* or *probability of false alarm*, and it can be calculated as $(1 − specificity)$. *Specificity*, in its turn, is estimated as:
#
# \begin{equation}
# specificity = \frac{TN}{TN + FP}
# \end{equation}
#
# In other words, *specificity* expresses the probability that the classifier correctly identifies *non-versicolors* in the *versicolor* identification example. *FPR*, therefore, shows how many incorrect *versicolor* votes (false alarms) the classifier makes for all *non-versicolor* examples while testing.
#
# The ROC curve shows how the sensitivity of the classifier increases as a function of (i.e., at the expense of) the fall-out. A perfect classifier would have $100\%$ sensitivity (no false negatives) and $100\%$ specificity (no false positives), which in the ROC space can be illustrated by the point in the upper left corner with the coordinate $(0,1)$. The closer the actual ROC curve gets to this point, the better is the classifier. A random guess would give a point along a diagonal line from the left bottom to the top right corners. Points above the diagonal represent good classification results (better than random); points below the line represent bad results (worse than random).
#
# To plot the ROC curve, let's first estimate the *FPR* and *TPR* for different threshold values using `sklearn`'s `roc_curve` functionality, and then plot *FPR* and *TPR* using `matplotlib`:
# +
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_train_versicolor, y_scores)
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], "k--")
plt.axis([0, 1, 0, 1.01])
plt.xlabel("False positive rate (fpr)")
plt.ylabel("True positive rate (tpr)")
plot_roc_curve(fpr, tpr)
plt.show()
# -
# Another characteristic of the ROC curve is the *area under the curve (AUC)*, which for a perfect classifier will equal $1$ and for a random one will equal $0.5$. A estimate on a actual classifier, thus, usually lies between these two values:
from sklearn.metrics import roc_auc_score
roc_auc_score(y_train_versicolor, y_scores)
# Now, the `Logistic Regression` classifier does not perform very well on the *versicolor* identification task, and the best classifier among the three so far is `Naive Bayes`. Let's compare its performance to the `Logistic Regression` and visualise the difference using the ROC curve. Note that `Naive Bayes` classifier implementation in `sklearn` doesn't use `decision_function`; [`predict_proba`](https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.GaussianNB.html#sklearn.naive_bayes.GaussianNB.predict_proba) is the equivalent of that for `Naive Bayes`:
# +
y_probas_gnb = cross_val_predict(gnb, X_train, y_train_versicolor, cv=3, method="predict_proba")
y_scores_gnb = y_probas_gnb[:, 1] # score = proba of the positive class
fpr_gnb, tpr_gnb, thresholds_gnb = roc_curve(y_train_versicolor, y_scores_gnb)
plt.plot(fpr, tpr, "b:", label="Logistic Regression")
plot_roc_curve(fpr_gnb, tpr_gnb, "Gaussian Naive Bayes")
plt.legend(loc="lower right")
plt.show()
# -
# The curve for `Naive Bayes` is much closer to the upper left corner, which shows that the classifier is better than `Logistic Regression` on this task. Let's estimate the AUC as well:
roc_auc_score(y_train_versicolor, y_scores_gnb)
# ## Step 4: Data transformations
#
# So far, you have trained three classifiers on two classification tasks – identification of *setosas* and *versicolors*. You've seen that all three classifiers perform well on the *setosas* identification example as the data is linearly separable, and only `Naive Bayes` performs well in the *versicolors* case, because the data with the given features is not linearly separable.
#
# For example, consider a decision boundary that a `Logistic Regression` classifier would learn for the linearly separable class of *setosas* (based on the first two features):
# +
X = iris.data[:, :2] # consider the first two features for plotting (as in Step 1)
y = iris.target
scatter_x = np.array(X[:, 0])
scatter_y = np.array(X[:, 1])
group = np.array(y)
cmap = matplotlib.cm.get_cmap('jet')
cdict = {0: cmap(0.1), 1: cmap(0.5), 2: cmap(0.9)}
labels = iris.target_names
fig, ax = plt.subplots(figsize=(8, 6))
for g in np.unique(group):
ix = np.where(group == g)
ax.scatter(scatter_x[ix], scatter_y[ix], c=np.array([cdict[g]]), #c = cdict[g],
label = labels[g], s = 100, marker = "H",
linewidth=2, alpha = 0.5)
log_reg.fit(X_train[:, :2], y_train_setosa) # train the classifier for setosas, using the first two features only
w = log_reg.coef_[0]
i = log_reg.intercept_[0]
xx = np.linspace(4, 8) # generate some values for feature1 (sepal length) in the appropriate range of values
yy = -(w[0]*xx + i)/w[1] # estimate the value for feature2 (sepal width) based on the learned weights and intercept
plt.plot(xx, yy, 'b--')
ax.legend()
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.show()
# -
# and compare it to the case of non-linearly separable class of *versicolors*:
# +
fig, ax = plt.subplots(figsize=(8, 6))
for g in np.unique(group):
ix = np.where(group == g)
ax.scatter(scatter_x[ix], scatter_y[ix], c=np.array([cdict[g]]), #c = cdict[g],
label = labels[g], s = 100, marker = "H",
linewidth=2, alpha = 0.5)
log_reg.fit(X_train[:, :2], y_train_versicolor)
w = log_reg.coef_[0]
i = log_reg.intercept_[0]
xx = np.linspace(4, 8)
yy = -(w[0]*xx + i)/w[1]
plt.plot(xx, yy, 'g--')
ax.legend()
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.show()
# -
# The original representation of the data that you've been using so far is simply not expressive enough. But what if you could transform this data in such a way that it could be linearly separated? In fact, there is a "trick" for this problem that involves transforming the original data using a number of new dimensions in such a way that in this new high-dimensional space the classes become linearly separable. This "trick" is commonly known as the *kernel trick* or *kernel method*.
#
# ### Kernel trick and approximate kernel maps
#
# You will most commonly hear about this method in the context of Support Vector Machines (SVMs), for example in the [Part II Machine Learning and Bayesian Inference course](https://www.cl.cam.ac.uk/teaching/2021/MLBayInfer/). However, some other linear classifiers, including [perceptron](https://en.wikipedia.org/wiki/Kernel_perceptron), allow for the kernel methods to be used as well.
#
# The general motivation behind the kernel trick is as follows: when the data is not linearly separable, i.e. there is no clear dividing boundary between the two classes, the kernel trick allows you to transform the data using a number of additional dimensions that would allow for such clear dividing boundary to be learned. Kernel methods require a user-specified kernel, i.e. a function that will transform the original data into a higher dimensional space. Polynomial and Gaussian (also known as *radial-basis function*) transformations are among the most widely used kernel functions. These data transformations might remind you of the polynomial feature transformation used with the linear regression in practical 1: recall that you cast the original features from the space where the relation between the features and the output cannot exactly be captured by a linear function, into a higher dimensional feature space using polynomial function. Recall also that this allowed you to apply a linear function to this new feature space leading to an improved result.
#
# You will learn more about kernel functions and their application to SVMs in more detail in the Part II Machine Learning and Bayesian Inference course. For the task at hand, you will use an `sklearn`'s [approximate kernel map](http://scikit-learn.org/stable/modules/kernel_approximation.html) in combination with the perceptron implementation of the `SGDClassifier`:
# +
from sklearn.kernel_approximation import RBFSampler
rbf_features = RBFSampler(gamma=1, random_state=42)
X_train_features = rbf_features.fit_transform(X_train)
print(X_train.shape, "->", X_train_features.shape)
sgd_rbf = SGDClassifier(max_iter=100, random_state=42, loss="perceptron",
eta0=1, learning_rate="constant", penalty=None)
sgd_rbf.fit(X_train_features, y_train_versicolor)
sgd_rbf.score(X_train_features, y_train_versicolor)
# -
# The output above shows that the classifier tries to learn the separation boundary in a highly-dimensional transformed feature space, and the accuracy of this learning on the training set is over $0.99$. Let's test this classifier in a 5-fold cross-validation manner, and compare precision, recall and F$_1$ scores to the linear classifier trained on the original data:
# +
y_train_pred = cross_val_predict(sgd, X_train, y_train_versicolor, cv=5)
precision = precision_score(y_train_versicolor, y_train_pred)
recall = recall_score(y_train_versicolor, y_train_pred)
f1 = f1_score(y_train_versicolor, y_train_pred)
print(precision, recall, f1)
y_train_pred = cross_val_predict(sgd_rbf, X_train_features, y_train_versicolor, cv=5)
precision = precision_score(y_train_versicolor, y_train_pred)
recall = recall_score(y_train_versicolor, y_train_pred)
f1 = f1_score(y_train_versicolor, y_train_pred)
print(precision, recall, f1)
# -
# Looks like the kernel trick helped improve the results on the originally non-linearly separable data significantly!
#
# ## Case 2: Multi-class classification
#
# Now remember that your actual goal is to build a three-way classifier that can predict *setosa*, *versicolor* and *virginica* classes, and not just tell whether an instance is an $X$ (*setosa* or *versicolor*) or not. Actually, you are already half way there, and here is why.
#
# Some classifiers are capable of handling multiple classes directly. For example, `Naive Bayes` learns about the probabilities of the classes in the data irrespective of the number of classes. Therefore, the binary examples above can naturally be extended to the 3-class classification scenario: you simply provide the classifier with the data on the $3$ rather than $2$ classes.
#
# In contrast, such classifiers as perceptron (`SDGClassifier`) and `Logistic Regression`, which seek to learn a linear separation boundary, are inherently binary classifiers: they try to learn a single separation boundary between two classes at a time. However, they can also very easily be extended to handle more than $2$ classes. Multi-class classification with such linear classifiers generally follows one of the two routes:
#
# - with the *one-vs-all* (*OvA*, or *one-vs-rest*, *OvR*) strategy you train $n$ classifiers (e.g., a setosa detector, a versicolor detector and a virginica detector). For a new instance, you apply all of the classifiers and predict the class that gets the highest score returned by the classifiers;
# - with the *one-vs-one* (*OvO*) strategy, you train a binary classifier for each pair of classes in your data and select the class that wins most of the duels.
#
# There are pros and cons to each of these approaches. E.g., with the *OvO* strategy, you end up training $n \times (n-1) / 2$ classifiers. I.e. for the iris dataset you will have $3$ classifiers (exactly as with the *OvA* strategy) but on a $10$-class problem this will amount to $45$ classifiers. On the other hand, the training sets with the *OvO* will be much smaller and more balanced. Some classifiers scale poorly with the size of the training set when it is imbalanced, so *OvO* for them is preferable (e.g., such are SVMs), but most of the linear classifiers use *OvA* instead.
#
# The nice thing about `sklearn` is that it implements the above strategies under the hood, so to perform a multi-class classification with the `SDGClassifier`, all you need to do is to provide it with the data and labels on $3$ classes, and it will train $3$ binary *OvA* classifiers and output the class with the highest score, i.e.:
sgd.fit(X_train, y_train) # i.e., all instances, not just one class
print(sgd.predict([setosa_example]))
print(sgd.predict([versicolor_example]))
# Recall that the *versicolor* class label is $1$, so the classifier's output is correct this time. Let's also check the result with the RBF kernel:
# +
sgd_rbf.fit(X_train_features, y_train) # i.e., all instances, not just one class
X_test_features = rbf_features.transform(X_test)
setosa_rbf_example = X_test_features[0] # note that you need to transform the test data in the same way, too
versicolor_rbf_example = X_test_features[17]
print(sgd_rbf.predict([setosa_rbf_example]))
print(sgd_rbf.predict([versicolor_rbf_example]))
# -
# This classifier classified both test examples correctly. Let's see what logic the classifier followed:
# +
setosa_scores = sgd_rbf.decision_function([setosa_rbf_example])
print(setosa_scores)
# check which class gets the maximum score
prediction = np.argmax(setosa_scores)
print(prediction)
# check which class this corresponds to in the classifier
print(sgd_rbf.classes_[prediction])
print(iris.target_names[sgd_rbf.classes_[prediction]])
# -
# This shows that *setosa* class got a much higher score than the other two. What about the versicolor example?
versicolor_scores = sgd_rbf.decision_function([versicolor_rbf_example])
print(versicolor_scores)
prediction = np.argmax(versicolor_scores)
print(prediction)
print(iris.target_names[sgd_rbf.classes_[prediction]])
# For comparison, let's see what the original `SGDClassifier` (without the RBF kernel) predicted:
versicolor_scores = sgd.decision_function([versicolor_example])
print(versicolor_scores)
prediction = np.argmax(versicolor_scores)
print(prediction)
print(iris.target_names[sgd.classes_[prediction]])
# Now, if you'd like to use the *OvO* strategy, you can enforce the `sklearn` to do so creating an instance of a `OneVsOneClassifier` (similarly, `OneVsRestClassifier` for *OvA*). Note that you're essentially using the same classifier as before, and it is just the framework within which the predictions are made:
# +
from sklearn.multiclass import OneVsOneClassifier
ovo_clf = OneVsOneClassifier(SGDClassifier(max_iter=100, random_state=42, loss="perceptron",
eta0=1, learning_rate="constant", penalty=None))
ovo_clf.fit(X_train_features, y_train)
ovo_clf.predict([versicolor_rbf_example])
# -
len(ovo_clf.estimators_)
# Now let's look into the `NaiveBayes` performance on the $3$-class problem:
gnb.fit(X_train, y_train)
gnb.predict([versicolor_example])
# It correctly classifies the *versicolor* example, so let's check how confident it is about this prediction (remember that you should use `predict_proba` with `NaiveBayes` and `decision_function` with the `SGDClassifier`):
gnb.predict_proba([versicolor_example])
# Let's look into the cross-validated performance of the classifiers:
print(cross_val_score(sgd_rbf, X_train_features, y_train, cv=5, scoring="accuracy"))
print(cross_val_score(ovo_clf, X_train_features, y_train, cv=5, scoring="accuracy"))
print(cross_val_score(gnb, X_train, y_train, cv=5, scoring="accuracy"))
# Finally, recall that in practical 1 you used further transformations on the data, e.g. scaling. Let's apply scaling and compare the results (note that you can use `np.mean()` to get the average accuracy score across all splits if you wish to get a single aggregated accuracy score instead of $5$ separate ones):
# +
from sklearn.preprocessing import StandardScaler, MinMaxScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train.astype(np.float64))
X_train_features_scaled = scaler.fit_transform(X_train_features.astype(np.float64))
print(cross_val_score(sgd_rbf, X_train_features_scaled, y_train, cv=5, scoring="accuracy"))
print(cross_val_score(ovo_clf, X_train_features_scaled, y_train, cv=5, scoring="accuracy"))
print(cross_val_score(gnb, X_train_scaled, y_train, cv=5, scoring="accuracy"))
# -
# ## Step 5: Error analysis
#
# Before applying the classifiers to the test data, let's gain a bit more insight into what the classifiers get wrong. Recall, that earlier you used confusion matrices to learn about the classification errors:
y_train_pred = cross_val_predict(sgd_rbf, X_train_features_scaled, y_train, cv=3)
conf_mx = confusion_matrix(y_train, y_train_pred)
conf_mx
# Let's visualise the classifier decisions:
plt.imshow(conf_mx, cmap = "jet")
plt.show()
# And if you'd like to highlight only the most salient errors, you can do so as follows (the `"jet"` color scheme uses red spectrum for higher numbers):
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
plt.imshow(norm_conf_mx, cmap = "jet")
plt.show()
# ## Final step – evaluating on the test set
#
# The `SGDClassifier` with the RBF kernel:
# +
from sklearn.metrics import accuracy_score
X_test_features_scaled = scaler.transform(X_test_features.astype(np.float64))
y_pred = sgd_rbf.predict(X_test_features_scaled)
accuracy_score(y_test, y_pred)
# -
precision = precision_score(y_test, y_pred, average='weighted')
recall = recall_score(y_test, y_pred, average='weighted')
f1 = f1_score(y_test, y_pred, average='weighted')
print(precision, recall, f1)
# The *OvO* SGD classifier:
# +
from sklearn.metrics import accuracy_score
X_test_features_scaled = scaler.transform(X_test_features.astype(np.float64))
y_pred = ovo_clf.predict(X_test_features_scaled)
accuracy_score(y_test, y_pred)
# -
precision = precision_score(y_test, y_pred, average='weighted')
recall = recall_score(y_test, y_pred, average='weighted')
f1 = f1_score(y_test, y_pred, average='weighted')
print(precision, recall, f1)
# The `NaiveBayes` classifier:
gnb.fit(X_train, y_train)
y_pred = gnb.predict(X_test)
accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred, average='weighted')
recall = recall_score(y_test, y_pred, average='weighted')
f1 = f1_score(y_test, y_pred, average='weighted')
print(precision, recall, f1)
# # Assignment: Handwritten digits dataset
#
# The dataset that you will use in this assignment is the [*digits* dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html) which contains $1797$ images of $10$ hand-written digits. The digits have been preprocessed so that $32 \times 32$ bitmaps are divided into non-overlapping blocks of $4 \times 4$ and the number of on pixels are counted in each block. This generates an input matrix of $8 \times 8$ where each element is an integer in the range of $[0, ..., 16]$. This reduces dimensionality and gives invariance to small distortions.
#
# For further information on NIST preprocessing routines applied to this data, see <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, *NIST Form-Based Handprint Recognition System*, NISTIR 5469, 1994.
#
# As before, use the `sklearn`'s data uploading routines to load the dataset and get the data fields:
from sklearn import datasets
digits = datasets.load_digits()
list(digits.keys())
digits
X, y = digits["data"], digits["target"]
X.shape
y.shape
# You can access the digits and visualise them using the following code (feel free to select another digit):
# +
some_digit = X[3]
some_digit_image = some_digit.reshape(8, 8)
plt.imshow(some_digit_image, cmap=matplotlib.cm.binary, interpolation="nearest")
plt.axis("off")
plt.show()
# -
y[3]
# For the rest of the practical, apply the data preprocessing techniques, implement and evaluate the classification models on the digits dataset using the steps that you applied above to the iris dataset.
| DSPNP_practical2/DSPNP_notebook2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pytorch Resnet to get image features then LSTM with attention to generate text
# Feel free to leave any comments or questions
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
import torch
import torch.nn as nn
import torchvision
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import PIL
from typing import Any, Callable, cast, Dict, List, Optional, Tuple
from torchvision.transforms.transforms import Compose, Normalize, Resize, ToTensor, RandomHorizontalFlip, RandomCrop
from torch.nn.utils.rnn import pad_sequence
import torch.optim as optim
import torch.nn.functional as F
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
import os
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
tqdm.pandas()
#import Levenshtein
#import cv2
from PIL import Image
from matplotlib import pyplot as plt
import seaborn as sns
import time
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# +
#make train and test as in https://www.kaggle.com/yasufuminakama/molecular-translation-naive-baseline
train = pd.read_csv('../input/bms-molecular-translation/train_labels.csv')
test = pd.read_csv('../input/bms-molecular-translation/sample_submission.csv')
def get_train_file_path(image_id):
return "../input/bms-molecular-translation/train/{}/{}/{}/{}.png".format(
image_id[0], image_id[1], image_id[2], image_id
)
def get_test_file_path(image_id):
return "../input/bms-molecular-translation/test/{}/{}/{}/{}.png".format(
image_id[0], image_id[1], image_id[2], image_id
)
train['file_path'] = train['image_id'].progress_apply(get_train_file_path)
test['file_path'] = test['image_id'].progress_apply(get_test_file_path)
print(f'train.shape: {train.shape} test.shape: {test.shape}')
display(train.head())
display(test.head())
# +
#make vocab
words=set()
for st in train['InChI']:
words.update(set(st))
len(words)
vocab=list(words)
vocab.append('<sos>')
vocab.append('<eos>')
vocab.append('<pad>')
stoi={'C': 0,')': 1,'P': 2,'l': 3,'=': 4,'3': 5,'N': 6,'I': 7,'2': 8,'6': 9,'H': 10,'4': 11,'F': 12,'0': 13,'1': 14,'-': 15,'O': 16,'8': 17,
',': 18,'B': 19,'(': 20,'7': 21,'r': 22,'/': 23,'m': 24,'c': 25,'s': 26,'h': 27,'i': 28,'t': 29,'T': 30,'n': 31,'5': 32,'+': 33,'b': 34,'9': 35,
'D': 36,'S': 37,'<sos>': 38,'<eos>': 39,'<pad>': 40}
itos={item[1]:item[0] for item in stoi.items()}
def string_to_ints(string):
l=[stoi['<sos>']]
for s in string:
l.append(stoi[s])
l.append(stoi['<eos>'])
return l
def ints_to_string(l):
return ''.join(list(map(lambda i:itos[i],l)))
# +
def pil_loader(path: str) -> Image.Image: #copied from torchvision
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def default_loader(path: str) -> Any:
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class InputDatasetTest(Dataset):
def __init__(self,paths,transform):
self.paths=paths
self.loader=default_loader
self.transform=transform
def __len__(self):
return len(self.paths)
def __getitem__(self,idx):
sample=self.loader(self.paths[idx])
sample=self.transform(sample)
return sample,idx
# +
#model adapted from https://www.kaggle.com/mdteach/image-captioning-with-attention-pytorch/data
class Attention(nn.Module):
def __init__(self, encoder_dim,decoder_dim,attention_dim):
super(Attention, self).__init__()
self.attention_dim = attention_dim
self.W = nn.Linear(decoder_dim,attention_dim)
self.U = nn.Linear(encoder_dim,attention_dim)
self.A = nn.Linear(attention_dim,1)
def forward(self, features, hidden_state):
u_hs = self.U(features) #(batch_size,64,attention_dim)
w_ah = self.W(hidden_state) #(batch_size,attention_dim)
combined_states = torch.tanh(u_hs + w_ah.unsqueeze(1)) #(batch_size,64,attemtion_dim)
attention_scores = self.A(combined_states) #(batch_size,64,1)
attention_scores = attention_scores.squeeze(2) #(batch_size,64)
alpha = F.softmax(attention_scores,dim=1) #(batch_size,64)
attention_weights = features * alpha.unsqueeze(2) #(batch_size,64,features_dim)
attention_weights = attention_weights.sum(dim=1) #(batch_size,64)
return alpha,attention_weights
class DecoderRNN(nn.Module):
def __init__(self,embed_size, vocab_size, attention_dim,encoder_dim,decoder_dim,drop_prob=0.3):
super().__init__()
#save the model param
self.vocab_size = vocab_size
self.attention_dim = attention_dim
self.decoder_dim = decoder_dim
self.embedding = nn.Embedding(vocab_size,embed_size)
self.attention = Attention(encoder_dim,decoder_dim,attention_dim)
self.init_h = nn.Linear(encoder_dim, decoder_dim)
self.init_c = nn.Linear(encoder_dim, decoder_dim)
self.lstm_cell = nn.LSTMCell(embed_size+encoder_dim,decoder_dim,bias=True)
self.f_beta = nn.Linear(decoder_dim, encoder_dim)
self.fcn = nn.Linear(decoder_dim,vocab_size)
self.drop = nn.Dropout(drop_prob)
def forward(self, features, captions):
#vectorize the caption
embeds = self.embedding(captions)
# Initialize LSTM state
h, c = self.init_hidden_state(features) # (batch_size, decoder_dim)
#get the seq length to iterate
seq_length = len(captions[0])-1 #Exclude the last one
batch_size = captions.size(0)
num_features = features.size(1)
preds = torch.zeros(batch_size, seq_length, self.vocab_size).to(device)
alphas = torch.zeros(batch_size, seq_length,num_features).to(device)
for s in range(seq_length):
alpha,context = self.attention(features, h)
lstm_input = torch.cat((embeds[:, s], context), dim=1)
h, c = self.lstm_cell(lstm_input, (h, c))
output = self.fcn(self.drop(h))
preds[:,s] = output
alphas[:,s] = alpha
return preds, alphas
def generate_caption(self,features,max_len=200,itos=None,stoi=None):
# Inference part
# Given the image features generate the captions
batch_size = features.size(0)
h, c = self.init_hidden_state(features) # (batch_size, decoder_dim)
alphas = []
#starting input
#word = torch.tensor(stoi['<sos>']).view(1,-1).to(device)
word=torch.full((batch_size,1),stoi['<sos>']).to(device)
embeds = self.embedding(word)
#captions = []
captions=torch.zeros((batch_size,202),dtype=torch.long).to(device)
captions[:,0]=word.squeeze()
for i in range(202):
alpha,context = self.attention(features, h)
#store the apla score
#alphas.append(alpha.cpu().detach().numpy())
#print('embeds',embeds.shape)
#print('embeds[:,0]',embeds[:,0].shape)
#print('context',context.shape)
lstm_input = torch.cat((embeds[:, 0], context), dim=1)
h, c = self.lstm_cell(lstm_input, (h, c))
output = self.fcn(self.drop(h))
#print('output',output.shape)
output = output.view(batch_size,-1)
#select the word with most val
predicted_word_idx = output.argmax(dim=1)
#save the generated word
#captions.append(predicted_word_idx.item())
#print('predicted_word_idx',predicted_word_idx.shape)
captions[:,i]=predicted_word_idx
#end if <EOS detected>
#if itos[predicted_word_idx.item()] == "<eos>":
# break
#send generated word as the next caption
#embeds = self.embedding(predicted_word_idx.unsqueeze(0))
embeds=self.embedding(predicted_word_idx).unsqueeze(1)
#covert the vocab idx to words and return sentence
#return [itos[idx] for idx in captions]
return captions
def init_hidden_state(self, encoder_out):
mean_encoder_out = encoder_out.mean(dim=1)
h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)
c = self.init_c(mean_encoder_out)
return h, c
class EncoderCNNtrain18(nn.Module):
def __init__(self):
super(EncoderCNNtrain18, self).__init__()
resnet = torchvision.models.resnet18()
#for param in resnet.parameters():
# param.requires_grad_(False)
modules = list(resnet.children())[:-2]
self.resnet = nn.Sequential(*modules)
def forward(self, images):
features = self.resnet(images) #(batch_size,512,8,8)
features = features.permute(0, 2, 3, 1) #(batch_size,8,8,512)
features = features.view(features.size(0), -1, features.size(-1)) #(batch_size,64,512)
#print(features.shape)
return features
class EncoderDecodertrain18(nn.Module):
def __init__(self,embed_size, vocab_size, attention_dim,encoder_dim,decoder_dim,drop_prob=0.3):
super().__init__()
self.encoder = EncoderCNNtrain18()
self.decoder = DecoderRNN(
embed_size=embed_size,
vocab_size = vocab_size,
attention_dim=attention_dim,
encoder_dim=encoder_dim,
decoder_dim=decoder_dim
)
def forward(self, images, captions):
features = self.encoder(images)
outputs = self.decoder(features, captions)
return outputs
# +
embed_size=200
vocab_size = len(vocab)
attention_dim=300
encoder_dim=512
decoder_dim=300
model = EncoderDecodertrain18(
embed_size=embed_size,
vocab_size = vocab_size,
attention_dim=attention_dim,
encoder_dim=encoder_dim,
decoder_dim=decoder_dim
)
MODEL_PATH='../input/model18train/modeltrain18_2'
model.load_state_dict(torch.load(MODEL_PATH))
model=model.to(device)
# +
transform = Compose([
#RandomHorizontalFlip(),
Resize((256,256), PIL.Image.BICUBIC),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset_test=InputDatasetTest(test['file_path'].to_numpy(),transform)
dataloader_test=DataLoader(
dataset=dataset_test,
batch_size=300,
shuffle=False,
num_workers=6)
def tensor_to_captions(ten):
l=ten.tolist()
ret=[]
for ls in l:
temp=''
#for i in ls[1:]:
for i in ls:
if i==stoi['<eos>'] or i==stoi['<pad>']:
break
temp=temp+itos[i]
ret.append(temp)
return ret
#print out a caption to make sure model working correctly
model.eval()
itr=iter(dataloader_test)
#print(next(itr))
img,idx=next(itr)
print(img.shape)
print(img[0:5].shape)
features=model.encoder(img[0:5].to(device))
caps = model.decoder.generate_caption(features,stoi=stoi,itos=itos)
#caption = ''.join(caps)[:-1]
captions=tensor_to_captions(caps)
plt.imshow(img[0].numpy().transpose((1,2,0)))
print(captions)
# -
model.eval()
with torch.no_grad():
for i,batch in enumerate(dataloader_test):
img,idx=batch[0].to(device),batch[1]
features=model.encoder(img)
caps=model.decoder.generate_caption(features,stoi=stoi,itos=itos)
captions=tensor_to_captions(caps)
test['InChI'].loc[idx]=captions
if i%1000==0: print(i)
output_cols = ['image_id', 'InChI']
test[output_cols].to_csv('submission.csv',index=False)
test[output_cols].head()
| kgl/meyer/pytorch-resnet-lstm-with-attention.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
import random
from sklearn.model_selection import train_test_split
from tqdm.notebook import tqdm
df = pd.read_csv("dataset.csv").fillna("0").sort_values("Disease").reset_index(drop=True)
cat_col = df.select_dtypes("object").columns
df[cat_col] = df[cat_col].astype("category")
df.info()
df[cat_col].nunique().reset_index(name='cardinality')
df.head()
for col in cat_col:
df[col] = df[col].str.replace(" ", "")
data_encoder = LabelEncoder()
sym = pd.read_csv("symptom_severity.csv").Symptom
sym = list(sym) + ["0"]
data_encoder = data_encoder.fit(sym)
print (len(data_encoder.classes_))
#np.save('data_classes.npy', data_encoder.classes_)
#df.to_csv("clean_dataset.csv", index=False)
encoder = LabelEncoder()
encoder = encoder.fit(df.Disease)
print (encoder.classes_)
#np.save('labels_classes.npy', encoder.classes_)
class model_embed:
random.seed(10) # seeding for reproductibility
def __init__(self, data, cv, encoder, encoder_data, name_y):
self.name = "Keras Categorical Embeding"
self.data = data
self.cv = cv
self.class_encoder = encoder
self.y_name = name_y
self.data_encoder = encoder_data
self.model_ = self.model()
def split(self):
"""split data into train, val, and test index"""
cv_index = {}
for index in range(0, self.cv):
split_train = []
split_val = []
split_test = []
train_test_split = 0
for index_data in self.data.index:
if index_data % 120 == 0 and index_data != 0:
for index_y in (random.sample(list(np.arange(index_data-120, index_data)), 24)):
split_test.append(int(index_y))
#print ("done ", index+1)
train_test_split = [train_idx for train_idx in (self.data.index) if train_idx not in split_test]
split_val = list(np.random.choice(train_test_split, size=(18)))
split_train = [train_idx for train_idx in train_test_split if train_idx not in split_val]
cv_index[(index)] = np.array([(split_train), (split_val), list(split_test)])
return cv_index
def model(self):
"""creating the model"""
model_data = tf.keras.Sequential([
tf.keras.layers.Embedding(134, 328, input_length=17),
tf.keras.layers.LSTM(32),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(164, activation="relu"),
tf.keras.layers.Dense(82, activation="relu"),
tf.keras.layers.Dense(41, activation="softmax")
])
return model_data
def model_train(self,data_x, data_y, val_x, val_y, test_x, test_y, num_epochs=5):
"""train model with specifid cross validation"""
self.model_.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = self.model_.fit(data_x, data_y, epochs=num_epochs, batch_size=32, verbose=1, validation_data=(val_x, val_y))
return self.model_.evaluate(test_x, test_y)
def predict(self, test_x):
data_p = self.data_encoder.transform(test_x)
data_p = np.reshape(data_p, [1, 17])
prediction = (self.model_.predict(data_p))
label = self.class_encoder.inverse_transform([np.argmax(prediction)])
return label
def data_slicing(self, cv, x):
"""return data x and y for every cv"""
train = self.data.iloc[cv[0]]
train_x = train.drop([self.y_name], axis=1)
for col in train_x.columns:
train_x[col] = self.data_encoder.transform(train_x[col].values)
train_y = self.class_encoder.transform(train[self.y_name].values)
val = self.data.iloc[cv[1]]
val_x = val.drop([self.y_name], axis=1)
for col in val_x.columns:
val_x[col] = self.data_encoder.transform(val_x[col].values)
val_y = self.class_encoder.transform(val[self.y_name])
test = self.data.iloc[cv[2]]
test_x = test.drop([self.y_name], axis=1)
for col in test_x.columns:
test_x[col] = self.data_encoder.transform(test_x[col].values)
test_y = self.class_encoder.transform(test[self.y_name])
return train_x, train_y, val_x, val_y, test_x, test_y
def run(self):
data_cv = self.split()
result = []
for x in tqdm(range(self.cv)):
print (x)
train_x, train_y, val_x, val_y, test_x, test_y = (self.data_slicing(data_cv[x], x))
#print (self.model_)
#print (train_x.shape, val_x.shape, test_x.shape)
#print (train_y.shape, val_y.shape, test_y.shape)
result.append(self.model_train(train_x, train_y, val_x, val_y, test_x, test_y, 5)[1])
#print (self.class_encoder.classes_)
print (sum(result)/self.cv)
m = model_embed(df, 1, encoder, data_encoder, "Disease")
m.run()
random_nmbr = random.sample(list(np.arange(0, 4920)), 1)
test_x = df.iloc[random_nmbr].drop(["Disease"], axis=1).values
test_y = df.iloc[random_nmbr]["Disease"].values
y = m.predict(test_x[0])
print (y.item(), test_y.item())
m.model().summary()
model_learned = m.model()
model_learned.save('embedding_lstm')
# +
# Model Load
from tensorflow import keras
model = keras.models.load_model('base_lstm')
# Encoder Load
data_enc = LabelEncoder()
data_enc.classes_ = np.load("data_classes.npy", allow_pickle=True)
label_enc = LabelEncoder()
label_enc.classes_ = np.load("labels_classes.npy", allow_pickle=True)
# -
x = list(df[0:1].drop("Disease", axis=1).values)
y = df[0:1].Disease
x
x_mod = data_enc.transform(np.reshape(x, [17, 1]))
x_mod
x_mod_p = np.reshape(x_mod, [1, 1, 17])
x_mod_p
label_enc.inverse_transform([np.argmax(model.predict(x_mod_p))])[0]
| Embedding CV LSTM/Model LSTM Embedding Dense Network.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="gZABpep_V-8C"
# # CUDA Exercise 04
# > Matrix summation example on GPU, only applied with single thread.
#
# This Jupyter Notebook can also be open by the google colab, so you don't have to buy a PC with a graphic card to play with CUDA. To launch the Google Colab, please click the below Icon.
#
# [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg#left)](https://colab.research.google.com/github/SuperChange001/CUDA_Learning/blob/main/Solution/Exercise_04.ipynb)
# + [markdown] id="P401L2N_WG6R"
# ## Initialize the CUDA dev environment
# + colab={"base_uri": "https://localhost:8080/"} id="OONoNFZeV63L" outputId="0de4afb8-9a85-42aa-ec39-d9ab1bc7f898"
# clone the code repo,
# !pip install git+git://github.com/depctg/nvcc4jupyter.git
# %load_ext nvcc_plugin
# Check the environment
# !lsb_release -a
# !nvcc --version
# !nvidia-smi
# + [markdown] id="DDN2x4izW0rO"
# ## Matrix Summation
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="egrZEZ3MWaP_" outputId="1e81177e-dd74-421e-ce4e-74b74936b818"
# %%cu
#include <stdio.h>
#include <assert.h>
#define M 10
#define N 10
#define MAX_ERR 1e-4
__global__ void matrix_summation(float* out, float *a, float *b, int m, int n)
{
int index;
for(int i = 0; i < m; i++)
{
for(int j = 0; j < n; j++)
{
index = i*n+j;
out[index] = a[index] + b[index];
}
}
}
int main()
{
float *a, *b, *out;
float *d_a, *d_b, *d_out;
a = (float*)malloc(sizeof(float) * (M * N));
b = (float*)malloc(sizeof(float) * (M * N));
out = (float*)malloc(sizeof(float) * (M * N));
// data initializtion
for(int i = 0; i < M; i++)
{
for(int j = 0; j < N; j++)
{
int index = i*N+j;
a[index] = i*3.14f;
b[index] = j;
}
}
printf("a[12] = %f\n", a[12]);
printf("b[12] = %f\n", b[12]);
// Allocate memory on GPU
cudaMalloc((void**)&d_a, sizeof(float) * (M * N));
cudaMalloc((void**)&d_b, sizeof(float) * (M * N));
cudaMalloc((void**)&d_out, sizeof(float) * (M * N));
// copy operator to GPU
cudaMemcpy(d_a, a, sizeof(float) * (M * N), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * (M * N), cudaMemcpyHostToDevice);
// GPU do the work, CPU waits
matrix_summation<<<1,1>>>(d_out, d_a, d_b, M, N);
// Get results from the GPU
cudaMemcpy(out, d_out, sizeof(float) * (M * N),
cudaMemcpyDeviceToHost);
// Test the result
for(int i = 0; i < M; i++)
{
for(int j = 0; j < N; j++)
{
int index = i*N+j;
assert(fabs(out[index] - a[index] - b[index]) < MAX_ERR);
}
}
printf("out[12] = %f\n", out[12]);
printf("PASSED\n");
cudaDeviceSynchronize();
// Free the memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
free(a);
free(b);
free(out);
return 0;
}
| Solution/Exercise_04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Identify "cloudy transition" Spitzer exoplanet host targets
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from astropy.utils.data import download_file
from astropy.table import Table, join
import astropy.units as u
from astroquery.nasa_exoplanet_archive import NasaExoplanetArchive
# -
cold_url = 'https://irsa.ipac.caltech.edu/data/SPITZER/docs/files/spitzer/extrasolarplanets.txt'
warm_url = 'http://ssc.spitzer.caltech.edu/warmmission/scheduling/observinglogs/extrasolarplanetsWarm.txt'
start_line = 24
cold_table = Table.read(cold_url, format='ascii.fixed_width',
header_start=start_line, data_start=start_line + 3, delimiter=' ')
start_line = 17
warm_table = Table.read(warm_url, format='ascii.fixed_width',
header_start=start_line, data_start=start_line + 3, delimiter=' ')
# +
all_channels = ['ch1', 'ch2', 'ch3', 'ch4']
observations = dict()
for table in [cold_table, warm_table]:
for target in table['TargetName']:
if '_' in target:
str1, str2, *rest = target.split('_')
elif '-' in target:
str1, str2, *rest = target.split('-')
else:
str1, str2, rest = target, "", ""
if str1 in all_channels:
channel = str1
name = str2
elif str2 in all_channels:
channel = str2
name = str1
# if no string is channel, make simple planet name:
if str1 not in all_channels and str2 not in all_channels:
name = str1 + str2 + ' '.join(rest)
fixed_name = (name.upper().replace(' ', '').replace('ECL', '')
.replace('EC', '').replace('CH24', '').replace('CH13', '')
.replace('-', '').replace('DAR', ''))
if fixed_name.endswith('B') or fixed_name.endswith('S') or fixed_name.endswith('C'):
fixed_name = fixed_name[:-1]
if fixed_name.endswith('CH') or fixed_name.endswith('CP'):
fixed_name = fixed_name[:-2]
fixed_name = fixed_name.lower()
if fixed_name in observations:
observations[fixed_name].add(channel)
else:
observations[fixed_name] = {channel}
# -
obs_table = Table(names=['NAME_LOWERCASE', "CH1", "CH2"], dtype=['S832', bool, bool])
for obs in observations:
row = [obs, 'ch1' in observations[obs], 'ch2' in observations[obs]]
obs_table.add_row(row)
obs_table = Table(obs_table)
archive = NasaExoplanetArchive.get_confirmed_planets_table(cache=False)
archive['NAME_LOWERCASE'] = [target[:-1].replace('-', '') for target in archive['NAME_LOWERCASE']]
observed_planets = join(archive, obs_table, join_type='inner', keys=['NAME_LOWERCASE'])
# +
teff = observed_planets['st_teff'] # stellar effective temperature
rstar = observed_planets['st_rad'] # stellar radius
a = observed_planets['pl_orbsmax'] # orbital semimajor axis
teq = (teff * np.sqrt(rstar / (2 * a))).decompose()
# +
cloudy_transition = (1300 * u.K < teq) & (1700 * u.K > teq)
print('\n'.join(observed_planets[cloudy_transition]['pl_hostname'].data.data))
# -
| id.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
path = "/../"
def simply(path):
stack = []
for i in path.split('/'):
if stack and i == "..":
stack.pop()
elif i not in ['.', '', '..']:
stack.append(i)
return "/" + "/".join(stack)
simply(path)
| Stack/Simplify path.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn.svm import OneClassSVM
from sklearn.model_selection import GridSearchCV
import pandas as pd
import numpy as np
df = pd.read_pickle("./dimapac.pkl")
def ip_from_string(ips):
return "".join(chr(int(n)) for n in ips.split("."))
df_deopped = df.drop(['route', 'timestamp'], axis=1)
df_deopped.iloc[:3800]
# +
train_length = int(df_deopped.shape[0] * 0.8)
train_df = df_deopped.iloc[:train_length]
test_df = df_deopped.iloc[train_length:]
nus = [0.001, 0.01, 0.1, 1]
gammas = [0.001, 0.01, 0.1, 1]
parameters = {'kernel':['linear', 'rbf'], 'gamma' : gammas, 'nu': nus}
svc = OneClassSVM()
svc_clf = GridSearchCV(svc, parameters, scoring='f1', return_train_score=True)
svc_clf.fit(train_df, np.full(train_length, 1))
# -
print(svc_clf.best_params_)
y = svc_clf.predict(my1601)
extern_traffic = pd.read_pickle("../traffic_analyzer/file.pkl")
extern_traffic = extern_traffic.drop(['route', 'timestamp'], axis=1)
# +
print("Класс '-1' - нехарактерный траффик, '1' - характерный траффик")
print("\n---Информация о пакетах взятых из открытых источников и никак не связанных с начаьной выборкой ")
y = svc_clf.predict(extern_traffic)
uniqueValues, occurCount = np.unique(y, return_counts=True)
print("Полученные классы : " , uniqueValues[::-1])
print("Количество обхектов в каждом классе соотвественно : ", occurCount)
print("\n---Информация о пакетах которые были отложены из начальный выборки для теста и в которых были сымитированы угрозы")
y = svc_clf.predict(test_df)
uniqueValues, occurCount = np.unique(y, return_counts=True)
print("Полученные классы : " , uniqueValues[::-1])
print("Количество обхектов в каждом классе соотвественно : ", occurCount)
# -
760 / (760 + 178)
import matplotlib
from matplotlib import pyplot as
from matplotlib.pyplot import figure
def f_importances(coef, names):
imp = coef
imp,names = zip(*sorted(zip(imp,names)))
plt.barh(range(len(names)), imp, align='center')
plt.yticks(range(len(names)), names)
plt.show()
figure(num=None, figsize=(12, 12), dpi=80, facecolor='w', edgecolor='k')
f_importances(svc_clf.best_estimator_.coef_[0], FEATURES)
svc_clf.best_estimator_.coef_[0]
# +
from sklearn import metrics
import numpy as np
import pandas as pd
from IPython.display import display, HTML
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
model = Sequential()
model.add(Dense(25, input_dim=df_deopped.shape[1], activation='relu'))
model.add(Dense(3, activation='relu'))
model.add(Dense(25, activation='relu'))
model.add(Dense(df_deopped.shape[1])) # Multiple output neurons
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(df_deopped,df_deopped,verbose=1,epochs=100)
# -
pred = model.predict(df_deopped)
score1 = np.sqrt(metrics.mean_squared_error(pred,df_deopped))
# pred = model.predict(x_normal)
# score2 = np.sqrt(metrics.mean_squared_error(pred,x_normal))
# pred = model.predict(x_attack)
# score3 = np.sqrt(metrics.mean_squared_error(pred,x_attack))
# print(f"Out of Sample Score (RMSE): {score1}")
# print(f"Insample Normal Score (RMSE): {score2}")
# print(f"Attack Underway Score (RMSE): {score3}")
score1
# Create neural net
model = Sequential()
model.add(Dense(10, input_dim=encdf.shape[1], activation='relu'))
model.add(Dense(50, input_dim=encdf.shape[1], activation='relu'))
model.add(Dense(10, input_dim=encdf.shape[1], activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
model.add(Dense(y.shape[1],activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3,
patience=5, verbose=1, mode='auto')
model.fit(encdf,encdf,validation_data=(x_test,y_test),
callbacks=[monitor],verbose=2,epochs=1000)
encdf= df_deopped
# Encode a numeric column as zscores
def encode_numeric_zscore(df, name, mean=None, sd=None):
if mean is None:
mean = df[name].mean()
if sd is None:
sd = df[name].std()
df[name] = (df[name] - mean) / sd
encode_numeric_zscore(encdf, 'client_package_size_mean')
encode_numeric_zscore(encdf, 'client_package_size_std') #2
encode_numeric_zscore(encdf, 'server_package_size_mean') #3
encode_numeric_zscore(encdf, 'server_package_size_std') #4
encode_numeric_zscore(encdf, 'client_batch_sizes_mean') #5
encode_numeric_zscore(encdf, 'client_batch_sizes_std') #6
encode_numeric_zscore(encdf, 'server_batch_sizes_mean') #7
encode_numeric_zscore(encdf, 'server_batch_sizes_std') #8
encode_numeric_zscore(encdf, 'client_batch_counts_mean') #9
encode_numeric_zscore(encdf, 'server_batch_counts_mean') #10
encode_numeric_zscore(encdf, 'client_efficiency') #11
encode_numeric_zscore(encdf, 'server_efficiency') #12
encode_numeric_zscore(encdf, 'ratio_sizes') #13
encode_numeric_zscore(encdf, 'ratio_application_size') #14
encode_numeric_zscore(encdf, 'ratio_packages') #15
encode_numeric_zscore(encdf, 'client_package_size_sum') #16
encode_numeric_zscore(encdf, 'client_application_size_sum')#17
encode_numeric_zscore(encdf, 'client_package_count') #18
encode_numeric_zscore(encdf, 'client_batch_counts_sum') #19
encode_numeric_zscore(encdf, 'server_package_size_sum') #20
encode_numeric_zscore(encdf, 'server_application_size_sum') #21
encode_numeric_zscore(encdf, 'server_package_count')#22
encode_numeric_zscore(encdf, 'server_batch_counts_sum') #23
encode_numeric_zscore(encdf, 'transport_protocol') #24
encode_numeric_zscore(encdf, 'ip_protocol_version') #25
FEATURES = [
'client_package_size_mean', #1
'client_package_size_std', #2
'server_package_size_mean', #3
'server_package_size_std', #4
'client_batch_sizes_mean', #5
'client_batch_sizes_std', #6
'server_batch_sizes_mean', #7
'server_batch_sizes_std', #8
'client_batch_counts_mean', #9
'server_batch_counts_mean', #10
'client_efficiency', #11
'server_efficiency', #12
'ratio_sizes', #13
'ratio_application_size', #14
'ratio_packages', #15
'client_package_size_sum', #16
'client_application_size_sum', #17
'client_package_count', #18
'client_batch_counts_sum', #19
'server_package_size_sum', #20
'server_application_size_sum', #21
'server_package_count', #22
'server_batch_counts_sum', #23
'transport_protocol', #24
'ip_protocol_version', #25
]
from sklearn.ensemble import IsolationForest
from sklearn.metrics import make_scorer, f1_score
from sklearn import model_selection
from sklearn.datasets import make_classification
train_length = int(df_deopped.shape[0] * 0.8)
train_df = df_deopped.iloc[:train_length]
test_df = df_deopped.iloc[train_length:]
# +
clf = IsolationForest(random_state=47)
param_grid = {'n_estimators': list(range(50, 100, 5)),
'max_samples': list(range(40, 80, 5)),
'contamination': [0.1, 0.2, 0.3, 0.4, 0.5],
'max_features': [5,10,15],
'bootstrap': [True, False],
'n_jobs': [5, 10, 20, 30]}
grid_dt_estimator = model_selection.GridSearchCV(clf,
param_grid,
scoring="f1",
refit=True,
cv=10,
return_train_score=True)
grid_dt_estimator.fit(train_df, np.full(train_length, 1))
# -
train_df.shape
| svm_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Streamlines tutorial
# In this tutorial you will learn how to download and render streamline data to display connectivity data. In brief, injections of anterogradely transported viruses are performed in wild type and CRE-driver mouse lines. The viruses express fluorescent proteins so that efferent projections from the injection locations can be traced everywhere in the brain. The images with the fluorescence data are acquired and registered to the Allen Coordinates reference frame. The traces of the streamlines are then extracted using a fast marching algorithm (by [https://neuroinformatics.nl](https://neuroinformatics.nl)).
#
# <img src="../Docs/Media/streamlines.png" width="600" height="350">
#
# The connectivity data are produced as part of the Allen Brain Atlas [Mouse Connectivity project](http://connectivity.brain-map.org).
#
# The first step towards being able to render streamlines data is to identify the set of experiments you are interested in (i.e. injections in the primary visual cortex of wild type mice]. To do so you can use the experiments explorer at [http://connectivity.brain-map.org].
#
# Once you have selected the experiments, you can download metadata about them using the 'download data as csv' option at the bottom of the page. This metadata .csv is what we can then use to get a link to the data to download.
#
# First we do the usual set up steps to get brainrender up and running
# ### Setup
#
# +
# We begin by adding the current path to sys.path to make sure that the imports work correctly
import sys
sys.path.append('../')
import os
# Set up VTKPLOTTER to work in Jupyter notebooks
from vtkplotter import *
embedWindow(backend=False)
# Import variables
from brainrender import * # <- these can be changed to personalize the look of your renders
# Import brainrender classes and useful functions
from brainrender.scene import Scene
from brainrender.Utils.parsers.streamlines import StreamlinesAPI
from brainrender.Utils.data_io import listdir
# Before populating the scene, we need to change the current working directory to the parent folder,
# then we are ready to start!
os.chdir(os.path.normpath(os.path.join(os.getcwd(), os.pardir)))
streamlines_api = StreamlinesAPI()
# -
# ## Downloading data
# If you have streamlines data already saved somewhere, you can skip this section.
#
# ### Manual download
# To download streamlines data, you have two options (see the [user guide](Docs/UserGuide.md) for more details.
# If you head to [http://connectivity.brain-map.org](http://connectivity.brain-map.org) you can download a .csv file with the experiment IDs of interest. Then you can use the following function to download the streamline data:
# parse .csv file
# Make sure to put the path to your downloaded file here
filepaths, data = streamlines_api.extract_ids_from_csv("Examples/example_files/experiments_injections.csv",
download=True)
# The `filepaths` variable stores the paths to the .json files that have been saved by the `streamlines_api`, the `data` variable already contains the streamlines data. You can pass either `filepaths` or `data` to `scene.add_streamlines` (see below) to render your streamlines data.
# ### Automatic download
# If you know that you simply want to download the data to a specific target structure, then you can let brainrender take care of downloading the data for you. This is how:
filepaths, data = streamlines_api.download_streamlines_for_region("CA1") # <- get the streamlines for CA1
# Once you have downloaded the streamlines data, it's time to render it in your scene.
#
# ## Rendering streamlines data
# You can pass either `data` or `filepaths` to `scene.add_streamlines`, just make sure to use the correct keyword argument (unimaginatively called `data` and `filepath`).
# +
# Start by creating a scene
scene = Scene(jupyter=True)
# you can then pass this list of filepaths to add_streamlines.
scene.add_streamlines(data, color="green")
# alternative you can pass a string with the path to a single file or a list of paths to the .json files that you
# created in some other way.
# then you can just render your scene
scene.render()
# -
# add_streamliens takes a few arguments that let you personalize the look of the streamlines:
# * `colorby`: you can pass the acronym to a brain region, then the default color of that region will be used for the streamliens
# * `color`: alternatively you can specify the color of the streamlines directly.
# * `alpha`, `radius`: you can change the transparency and the thickness of the actors used to render the streamlines.
# * `show_injection_site`: if set as True, a sphere will be rendered at the locations that correspond to the injections sytes.
#
#
# Don't forget to check the other examples to lear more about how to use brainrender to make amazing 3D renderings!
# Also, you can find a list of variables you can play around with in brainrender.variables.py
# Playing around with these variables will allow you to make the rendering look exactly how you want them to be.
| Examples/notebooks/Streamlines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.1
# language: julia
# name: julia-1.3
# ---
# +
@info "Train baseline Single Headed Attention Recurrent language model using enwik8 dataset..."
@info "This model is the main model of SHA-RNN, which contains 4 layers of SHA-RNN"
using Knet
include("../src/data.jl")
include("../src/model.jl")
include("../src/train.jl")
# +
BATCHSIZE = 2 ; @show BATCHSIZE
BPTT = 1024 ; @show BPTT
MEMSIZE = 5000 ; @show MEMSIZE
EMSIZE = 1024 ; @show EMSIZE
# -
datadir = "../data/enwik8"
jld2dir = "../jld2/enwik8.jld2"
if !isfile(jld2dir)
println("Reading data from directory: $datadir")
println("Setting batch size to $BATCHSIZE")
vocab = Vocab("$datadir/train.txt")
trainfile = TextReader("$datadir/train.txt", vocab)
validfile = TextReader("$datadir/valid.txt", vocab)
testfile = TextReader("$datadir/test.txt", vocab)
dtrn = TextData(trainfile, batchsize=BATCHSIZE, bptt=BPTT)
ddev = TextData(validfile, batchsize=BATCHSIZE, bptt=BPTT, randomize = false)
dtst = TextData(testfile, batchsize=BATCHSIZE, bptt=BPTT, randomize = false)
println("Saving data to $jld2dir")
Knet.save(jld2dir, "dtrn", dtrn, "dtst", dtst, "ddev", ddev)
else
println("Loading data from $jld2dir")
(dtrn, dtst, ddev) = Knet.load(jld2dir, "dtrn", "dtst", "ddev")
vocab = dtrn.src.vocab
if dtrn.batchsize != BATCHSIZE
changebatchsize!(dtrn, BATCHSIZE)
changebatchsize!(ddev, BATCHSIZE)
changebatchsize!(dtst, BATCHSIZE)
end;
dtrn.bptt = BPTT
dtst.bptt = BPTT
ddev.bptt = BPTT
end;
# +
@info "Initializing the model and collecting training data..."
epochs, em_size, hidden_size, layers = 8, EMSIZE, (EMSIZE*4), 4
println("embedding size: ", em_size)
println("hidden size: ", hidden_size)
println("layers: ", layers)
println("Collecting training data...")
println("epochs: ", epochs)
ctrn = collect(dtrn)
trn = collect(flatten(collect(dtrn) for i in 1:epochs))
dev = collect(ddev)
mintrn = ctrn[1:20];
model = SHARNN(em_size, hidden_size, vocab, layers; num_max_positions=MEMSIZE);
# -
@info "Starting training, total iteration no: $(length(trn))"
initlamb!(model, length(trn); lr=0.002, warmup=(1200)/length(trn))
model = train!(model, trn, dev, mintrn; report_iter=length(ctrn), update_per_n_batch=8)
halfdownlr(model)
model = train!(model, trn, dev, mintrn; report_iter=length(ctrn), update_per_n_batch=8)
# +
@info "Finished training, Starting evaluation ..."
devloss = loss(model, ddev);
println("Development set scores: ", report_lm(devloss))
testloss = loss(model, dtst);
println("Test set scores: ", report_lm(testloss))
model_name = "full_main.jld2"
@info "Saving the model as $(model_name)"
Knet.save(model_name, "model", model);
| notebooks/SHA-RNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
from nltk.tokenize import TweetTokenizer
# -
df = pd.read_pickle('corpus/tweet_df.pkl')
df
alltokens = [token for tweet in df['text'] for token in TweetTokenizer().tokenize(tweet)]
alltokens
len(alltokens)
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp card
# -
# # Card
#
# > API details.
#hide
from nbdev.showdoc import *
# +
#export
#from __future__ import print_function, division
import random
class Card:
"""Represents a standard playing card.
Attributes:
suit: integer 0-3
rank: integer 1-13
"""
suit_names = ["Clubs", "Diamonds", "Hearts", "Spades"]
rank_names = [None, "Ace", "2", "3", "4", "5", "6", "7",
"8", "9", "10", "Jack", "Queen", "King"]
def __init__(self, suit=0, rank=2):
self.suit = suit
self.rank = rank
def __str__(self):
"""Returns a human-readable string representation."""
return '%s of %s' % (Card.rank_names[self.rank],
Card.suit_names[self.suit])
def __eq__(self, other) -> bool:
"""Checks whether self and other have the same rank and suit.
"""
return self.suit == other.suit and self.rank == other.rank
def __lt__(self, other) -> bool:
"""Compares this card to other, first by suit, then rank.
"""
t1 = self.suit, self.rank
t2 = other.suit, other.rank
return t1 < t2
def __repr__(self): return self.__str__()
def foo(): pass
# -
# Card is a class that represents a single card in a deck of cards. For example:
Card(suit=2, rank=11)
# +
c = Card(suit=1, rank=3)
assert str(c) == '3 of Diamonds'
c2 = Card(suit=2, rank=11)
assert str(c2) == 'Jack of Hearts'
# -
# You can do comparisons of cards, too!
assert c2 > c
# > Note: Look at fastcore's testing utilities for convenience functions that print out helpful error messages by default when there is an error. These convenience functions are an improvement upon using assert.
# You can show the docs for methods by calling show_doc. For example, the code show_doc(Card.__eq__) produces the following documentation:
show_doc(Card.__eq__)
card1 = Card(suit=1, rank=3)
card2 = Card(suit=1, rank=3)
assert card1 == card2
| 00_card.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Publications markdown generator for qiao-tw
#
# Takes a TSV of publications with metadata and converts them for use with [qiao-tw.github.io](qiao-tw.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `publications.py`. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# -
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
#
# This is how the raw file looks (it doesn't look pretty, use a spreadsheet or other program to edit and create).
# !cat publications.tsv
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# + deletable=true editable=true
import pandas as pd
# -
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# + deletable=true editable=true
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# -
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# + deletable=true editable=true
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# -
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# + deletable=true editable=true
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
if len(str(item.paper_url)) > 5:
md += "\n[Download paper here](" + item.paper_url + ")\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
# -
# These files are in the publications directory, one directory below where we're working from.
# + deletable=true editable=true
# !ls ../_publications/
# + deletable=true editable=true
# !cat ../_publications/2009-10-01-paper-title-number-1.md
# + deletable=true editable=true
| markdown_generator/publications.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Installation
# - `pip install eleanor` ([eleanor Docs](http://adina.feinste.in/eleanor))
# - Through Github ([eleanor GitHub](https://github.com/afeinstein20/eleanor))
# +
import eleanor
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = 16
# -
# ## Step 1: Initiating eleanor.Source
# To use `eleanor` you need some identifier for your target. This can be either the TIC ID, a set of coordinates, a Gaia ID, or the name of your star. If you know the sector your target was observed in, that's great! If not, that's okay. You don't need to set the `sector` argument. Instead, `eleanor` will pass back the latest sector your target was observed in. For example, for a CVZ target, `eleanor.Source` will return data from Sector 13.
star = eleanor.Source(name='WASP-100', sector=3)
# Above, we have downloaded a few `eleanor` data products: a postcard, a 2D background model on the postcard level, and a pointing model. These products are currently only available for the first 13 sectors, but will be created for the Northern hemisphere (Sectors 14-26) as well.
#
# ----
#
# Whatever identifier you pass in, `eleanor` will crossmatch with the corresponding TIC ID, coordinates, and/or Gaia ID.
star.tic, star.coords, star.gaia
# The `eleanor.Source` class also provides information on where your target was observed on the TESS CCDs:
star.sector, star.camera, star.chip
# ## Step 2: Making a light curve
# `eleanor.Source` set us up with everything we need to make the light curve. To do this, we need to create an `eleanor.TargetData` object. There are some additional arguments that we can use here, such as setting the Target Pixel File (TPF) height and width, creating a point-spread function (PSF) modeled light curve, and creating a principle component analysis (PCA) light curve.
#
# We'll do both the PSF light curve, by setting `do_psf=True`, and the PCA light curve, by setting `do_pca=True`, for this example.
data = eleanor.TargetData(star, do_psf=True, do_pca=True)
# In addition to different types of light curves, we are also trying a few different background subtraction methods to remove as much background noise as possible. The three options `eleanor` tries are:
# - 1D postcard background: A constant calculated from each postcard frame, masking stars.
# - 1D TPF background: A constant calculated froom each TPF frame, masking stars.
# - 2D background: The 2D background pixels are subtracted from the TPF.
#
# The postcard and the 2D modeled background from the postcard look like this:
# We can see which background produced the best light curve. And by best, the `eleanor` light curves are optimized for transit searches, so we minimized the CDPP (combined differential photometric precision) to define the "best".
plt.title('2D background')
plt.imshow(data.post_obj.background2d[100], vmin=0, vmax=20)
plt.colorbar();
data.bkg_type
# "PC_LEVEL" means the 1D postcard level background removed the most systematics. This background model looks like:
plt.figure(figsize=(14,4))
plt.plot(data.time, data.post_obj.bkg, 'w', lw=3)
plt.xlabel('time [bjd-2457000]')
plt.ylabel('background flux');
# In the same spirit, the aperture selected by `eleanor` also minimizes the CDPP. The aperture, over the target, selected looks like:
plt.imshow(data.tpf[100])
plt.imshow(data.aperture, alpha=0.4, cmap='Greys_r');
# Or this can be better seen using the `eleanor.Visualize` class:
vis = eleanor.Visualize(data)
fig = vis.aperture_contour()
# ## Step 3: Look at your light curves!
# It's time to see what was created:
# +
q = data.quality == 0
plt.figure(figsize=(14,4))
plt.plot(data.time[q], data.raw_flux[q]/np.nanmedian(data.raw_flux[q])-0.005, 'w', lw=3,
label='RAW')
plt.plot(data.time[q], data.corr_flux[q]/np.nanmedian(data.corr_flux[q])+0.015, 'k', lw=3,
label='CORR')
plt.ylim(0.98,1.02)
plt.xlabel('time [bjd-2457000]')
plt.ylabel('normalized flux')
plt.legend();
# -
plt.figure(figsize=(14,4))
plt.plot(data.time[q], data.pca_flux[q]/np.nanmedian(data.pca_flux[q])-0.005, 'darkorange', lw=3,
label='PCA')
plt.plot(data.time[q], data.psf_flux[q]/np.nanmedian(data.psf_flux[q])+0.015, 'skyblue', lw=3,
label='PSF')
plt.ylim(0.98,1.02)
plt.xlabel('time [bjd-2457000]')
plt.ylabel('normalized flux')
plt.legend();
# If you're missing all of the great tools implemented by `lightkurve`, we have an easy fix for you. By calling `eleanor.to_lightkurve()`, you will get a `lightkurve.LightCurve` object. You can also specify which flux you want passed into the object.
lc = data.to_lightkurve(flux=data.corr_flux)
lc.normalize().plot()
# ## Step 1 (redone): What if my target was observed in multiple sectors?
#
# Instead of initiating an `eleanor.Source` class, you can call `eleanor.multi_sectors`, which will return a list of `eleanor.Source` objects per each sector your target was observed in. If you want specific sectors, you can pass those in as a list/array. Otherwise, if you want all of the sectors your target was observed in, you can pass in `sectors="all"` and `eleanor` will fetch all of those for you.
stars = eleanor.multi_sectors(tic=star.tic, sectors=np.arange(2,6,1,dtype=int))
stars
# It downloads all of the postcards, 2D postcard backgrounds, and pointing models. Now, to get a light curve from each sector, you can pass it into `eleanor.TargetData` in a loop:
# +
data = []
for s in stars:
datum = eleanor.TargetData(s)
data.append(datum)
# -
# And to look at our light curves:
plt.figure(figsize=(14,4))
for d in data:
q = d.quality == 0
plt.plot(d.time[q], d.corr_flux[q]/np.nanmedian(d.corr_flux[q]), 'w', lw=3)
plt.ylim(0.99,1.005)
plt.xlabel('time [bjd-2457000]')
plt.ylabel('normalized flux');
# ## Step 4 (optional): Remember that visualization object?
# Earlier we created an `eleanor.Visualize` object that allowed us to overplot a countour of the aperture on the TPF. There are other things in there to use as well when vetting your target!
#
# One of the most useful tricks is creating a pixel-by-pixel light curve grid, to see if the signal you're seeing in the light curve is from your source or something nearby:
fig = vis.pixel_by_pixel()
# Okay, maybe not useful like this, but zooming in and assigning the light curve color to be that of the TPF pixel:
fig = vis.pixel_by_pixel(colrange=[4,10], rowrange=[4,10], color_by_pixel=True)
# It looks like the signal is coming from the source! But just in case, you can overplot nearby Gaia sources on your TPF (thanks <NAME>!). The points are related to the magnitude of the source:
fig = vis.plot_gaia_overlay(magnitude_limit=16)
# ## Step 5 (technically optional, but should consider!): Crossmatching
#
# Within `eleanor`, we also have tools to see if your target has a light curve produced by the TASOC team (asteroseismology), Oelkers & Stassun difference imaging pipeline, or was observed at 2-minute cadence! We can start digging by the following:
crossmatch = eleanor.Crossmatch(data[0])
# To check for 2-minute data, we use `lightkurve` behind the scenes:
crossmatch.two_minute()
# Because this returns a `lightkurve.SearchResult` object, you can download the data product right from there and go about using the other `lightkurve` tools.
#
# -----
#
# To check out the TASOC pipeline:
crossmatch.tasoc_lc()
plt.figure(figsize=(14,4))
q = crossmatch.tasoc_pixel_quality == 0
plt.plot(crossmatch.tasoc_time[q], crossmatch.tasoc_flux_raw[q], 'k', lw=3)
plt.xlabel('time [BJD - 2457000]')
plt.ylabel('flux');
# To check the Oelkers & Stassun light curves (it shouold be noted these light curves are in magnitudes!):
crossmatch.oelkers_lc()
plt.figure(figsize=(14,4))
plt.plot(crossmatch.os_time, crossmatch.os_mag, 'w', lw=3)
plt.xlabel('time [BJD - 2457000]')
plt.ylabel('magnitude');
| notebooks/online_tess_science_eleanor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ex3 - Getting and Knowing your Data
#
#
# ### Step 1. Import the necessary libraries
# ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user).
# ### Step 3. Assign it to a variable called users and use the 'user_id' as index
# ### Step 4. See the first 25 entries
# ### Step 5. See the last 10 entries
# ### Step 6. What is the number of observations in the dataset?
# ### Step 7. What is the number of columns in the dataset?
# ### Step 8. Print the name of all the columns.
# ### Step 9. How is the dataset indexed?
# "the index" (aka "the labels")
# ### Step 10. What is the data type of each column?
# ### Step 11. Print only the occupation column
# ### Step 12. How many different occupations are in this dataset?
# ### Step 13. What is the most frequent occupation?
# ### Step 14. Summarize the DataFrame.
# ### Step 15. Summarize all the columns
# ### Step 16. Summarize only the occupation column
# ### Step 17. What is the mean age of users?
# ### Step 18. What is the age with least occurrence?
| week4/day2/exercises/01_Getting_&_Knowing_Your_Data/Occupation/Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pytorch3d (local)
# language: python
# name: pytorch3d_local
# ---
# + colab={} colab_type="code" id="_Ip8kp4TfBLZ"
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# + [markdown] colab_type="text" id="kuXHJv44fBLe"
# # Render a textured mesh
#
# This tutorial shows how to:
# - load a mesh and textures from an `.obj` file.
# - set up a renderer
# - render the mesh
# - vary the rendering settings such as lighting and camera position
# - use the batching features of the pytorch3d API to render the mesh from different viewpoints
# + [markdown] colab_type="text" id="Bnj3THhzfBLf"
# ## 0. Install and Import modules
# + [markdown] colab_type="text" id="okLalbR_g7NS"
# If `torch`, `torchvision` and `pytorch3d` are not installed, run the following cell:
# + colab={"base_uri": "https://localhost:8080/", "height": 717} colab_type="code" id="musUWTglgxSB" outputId="16d1a1b2-3f7f-43ed-ca28-a4d236cc0572"
# !pip install torch torchvision
# !pip install 'git+https://github.com/facebookresearch/pytorch3d.git@stable'
# + colab={} colab_type="code" id="nX99zdoffBLg"
import os
import torch
import matplotlib.pyplot as plt
from skimage.io import imread
# Util function for loading meshes
from pytorch3d.io import load_objs_as_meshes, load_obj
# Data structures and functions for rendering
from pytorch3d.structures import Meshes, Textures
from pytorch3d.renderer import (
look_at_view_transform,
OpenGLPerspectiveCameras,
PointLights,
DirectionalLights,
Materials,
RasterizationSettings,
MeshRenderer,
MeshRasterizer,
SoftPhongShader
)
# add path for demo utils functions
import sys
import os
sys.path.append(os.path.abspath(''))
# + [markdown] colab_type="text" id="Lxmehq6Zhrzv"
# If using **Google Colab**, fetch the utils file for plotting image grids:
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="HZozr3Pmho-5" outputId="be5eb60d-5f65-4db1-cca0-44ee68c8f5fd"
# !wget https://raw.githubusercontent.com/facebookresearch/pytorch3d/master/docs/tutorials/utils/plot_image_grid.py
from plot_image_grid import image_grid
# + [markdown] colab_type="text" id="g4B62MzYiJUM"
# OR if running **locally** uncomment and run the following cell:
# + colab={} colab_type="code" id="paJ4Im8ahl7O"
# from utils import image_grid
# + [markdown] colab_type="text" id="5jGq772XfBLk"
# ### 1. Load a mesh and texture file
#
# Load an `.obj` file and it's associated `.mtl` file and create a **Textures** and **Meshes** object.
#
# **Meshes** is a unique datastructure provided in PyTorch3D for working with batches of meshes of different sizes.
#
# **Textures** is an auxillary datastructure for storing texture information about meshes.
#
# **Meshes** has several class methods which are used throughout the rendering pipeline.
# + [markdown] colab_type="text" id="a8eU4zo5jd_H"
# If running this notebook using **Google Colab**, run the following cell to fetch the mesh obj and texture files and save it at the path `data/cow_mesh`:
# If running locally, the data is already available at the correct path.
# + colab={"base_uri": "https://localhost:8080/", "height": 578} colab_type="code" id="tTm0cVuOjb1W" outputId="6cd7e2ec-65e1-4dcc-99e8-c347bc504f0a"
# !mkdir -p data/cow_mesh
# !wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.obj
# !wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow.mtl
# !wget -P data/cow_mesh https://dl.fbaipublicfiles.com/pytorch3d/data/cow_mesh/cow_texture.png
# + colab={} colab_type="code" id="gi5Kd0GafBLl"
# Setup
if torch.cuda.is_available():
device = torch.device("cuda:0")
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
# Set paths
DATA_DIR = "./data"
obj_filename = os.path.join(DATA_DIR, "cow_mesh/cow.obj")
# Load obj file
mesh = load_objs_as_meshes([obj_filename], device=device)
texture_image=mesh.textures.maps_padded()
# + [markdown] colab_type="text" id="5APAQs6-fBLp"
# #### Let's visualize the texture map
# + colab={"base_uri": "https://localhost:8080/", "height": 428} colab_type="code" id="YipUhrIHfBLq" outputId="48987b1d-5cc1-4c2a-cb3c-713d64f6a38d"
plt.figure(figsize=(7,7))
plt.imshow(texture_image.squeeze().cpu().numpy())
plt.grid("off");
plt.axis('off');
# + [markdown] colab_type="text" id="GcnG6XJ6fBLu"
# ## 2. Create a renderer
#
# A renderer in PyTorch3D is composed of a **rasterizer** and a **shader** which each have a number of subcomponents such as a **camera** (orthographic/perspective). Here we initialize some of these components and use default values for the rest.
#
# In this example we will first create a **renderer** which uses a **perspective camera**, a **point light** and applies **phong shading**. Then we learn how to vary different components using the modular API.
# + colab={} colab_type="code" id="dX466mWnfBLv"
# Initialize an OpenGL perspective camera.
# With world coordinates +Y up, +X left and +Z in, the front of the cow is facing the -Z direction.
# So we move the camera by 180 in the azimuth direction so it is facing the front of the cow.
R, T = look_at_view_transform(2.7, 0, 180)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
# and blur_radius=0.0. We also set bin_size and max_faces_per_bin to None which ensure that
# the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
# explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
# the difference between naive and coarse-to-fine rasterization.
raster_settings = RasterizationSettings(
image_size=512,
blur_radius=0.0,
faces_per_pixel=1,
)
# Place a point light in front of the object. As mentioned above, the front of the cow is facing the
# -z direction.
lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]])
# Create a phong renderer by composing a rasterizer and a shader. The textured phong shader will
# interpolate the texture uv coordinates for each vertex, sample from a texture image and
# apply the Phong lighting model
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=SoftPhongShader(
device=device,
cameras=cameras,
lights=lights
)
)
# + [markdown] colab_type="text" id="KyOY5qXvfBLz"
# ## 3. Render the mesh
# + [markdown] colab_type="text" id="8VkRA4qJfBL0"
# The light is in front of the object so it is bright and the image has specular highlights.
# + colab={"base_uri": "https://localhost:8080/", "height": 592} colab_type="code" id="gBLZH8iUfBL1" outputId="cc3cd3f0-189e-4497-ce47-e64b4da542e8"
images = renderer(mesh)
plt.figure(figsize=(10, 10))
plt.imshow(images[0, ..., :3].cpu().numpy())
plt.grid("off");
plt.axis("off");
# + [markdown] colab_type="text" id="k161XF3sfBL5"
# ## 4. Move the light behind the object and re-render
#
# We can pass arbirary keyword arguments to the `rasterizer`/`shader` via the call to the `renderer` so the renderer does not need to be reinitialized if any of the settings change/
#
# In this case, we can simply update the location of the lights and pass them into the call to the renderer.
#
# The image is now dark as there is only ambient lighting, and there are no specular highlights.
# + colab={} colab_type="code" id="BdWkkeibfBL6"
# Now move the light so it is on the +Z axis which will be behind the cow.
lights.location = torch.tensor([0.0, 0.0, +1.0], device=device)[None]
images = renderer(mesh, lights=lights)
# + colab={"base_uri": "https://localhost:8080/", "height": 592} colab_type="code" id="UmV3j1YffBL9" outputId="2e8edca0-5bd8-4a2f-a160-83c4b0520123"
plt.figure(figsize=(10, 10))
plt.imshow(images[0, ..., :3].cpu().numpy())
plt.grid("off");
plt.axis("off");
# + [markdown] colab_type="text" id="t93aVotMfBMB"
# ## 5. Rotate the object, modify the material properties or light properties
#
# We can also change many other settings in the rendering pipeline. Here we:
#
# - change the **viewing angle** of the camera
# - change the **position** of the point light
# - change the **material reflectance** properties of the mesh
# + colab={} colab_type="code" id="4mYXYziefBMB"
# Rotate the object by increasing the elevation and azimuth angles
R, T = look_at_view_transform(dist=2.7, elev=10, azim=-150)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
# Move the light location so the light is shining on the cow's face.
lights.location = torch.tensor([[2.0, 2.0, -2.0]], device=device)
# Change specular color to green and change material shininess
materials = Materials(
device=device,
specular_color=[[0.0, 1.0, 0.0]],
shininess=10.0
)
# Re render the mesh, passing in keyword arguments for the modified components.
images = renderer(mesh, lights=lights, materials=materials, cameras=cameras)
# + colab={"base_uri": "https://localhost:8080/", "height": 592} colab_type="code" id="rHIxIfh5fBME" outputId="1ca2d337-2983-478f-b3c9-d64b84ba1a31"
plt.figure(figsize=(10, 10))
plt.imshow(images[0, ..., :3].cpu().numpy())
plt.grid("off");
plt.axis("off");
# + [markdown] colab_type="text" id="17c4xmtyfBMH"
# ## 6. Batched Rendering
#
# One of the core design choices of the PyTorch3D API is to support **batched inputs for all components**.
# The renderer and associated components can take batched inputs and **render a batch of output images in one forward pass**. We will now use this feature to render the mesh from many different viewpoints.
#
# + colab={} colab_type="code" id="CDQKebNNfBMI"
# Set batch size - this is the number of different viewpoints from which we want to render the mesh.
batch_size = 20
# Create a batch of meshes by repeating the cow mesh and associated textures.
# Meshes has a useful `extend` method which allows us do this very easily.
# This also extends the textures.
meshes = mesh.extend(batch_size)
# Get a batch of viewing angles.
elev = torch.linspace(0, 180, batch_size)
azim = torch.linspace(-180, 180, batch_size)
# All the cameras helper methods support mixed type inputs and broadcasting. So we can
# view the camera from the same distance and specify dist=2.7 as a float,
# and then specify elevation and azimuth angles for each viewpoint as tensors.
R, T = look_at_view_transform(dist=2.7, elev=elev, azim=azim)
cameras = OpenGLPerspectiveCameras(device=device, R=R, T=T)
# Move the light back in front of the cow which is facing the -z direction.
lights.location = torch.tensor([[0.0, 0.0, -3.0]], device=device)
# + colab={} colab_type="code" id="gyYJCwEDfBML"
# We can pass arbirary keyword arguments to the rasterizer/shader via the renderer
# so the renderer does not need to be reinitialized if any of the settings change.
images = renderer(meshes, cameras=cameras, lights=lights)
# -
image_grid(images.cpu().numpy(), rows=4, cols=5, rgb=True)
# + [markdown] colab_type="text" id="t3qphI1ElUb5"
# ## 7. Conclusion
# In this tutorial we learnt how to **load** a textured mesh from an obj file, initialize a PyTorch3D datastructure called **Meshes**, set up an **Renderer** consisting of a **Rasterizer** and a **Shader**, and modify several components of the rendering pipeline.
| docs/tutorials/render_textured_meshes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from tools import *
from models import *
import plotly.graph_objects as go
import plotly.figure_factory as ff
from Bio.SeqUtils import GC
import pickle
import warnings
warnings.filterwarnings('ignore')
#https://github.com/PuYuQian/PyDanQ/blob/master/DanQ_train.py
# -
#for reproducibility
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
# +
#CONSTANTS AND HYPERPARAMETERS (add to yaml)
# Device configuration
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Hyper parameters
num_epochs = 15 #15
batch_size = 100
learning_rate = 0.003
# -
dataloaders, target_labels, train_out = load_datas("../data/tf_peaks_50_partial.h5", batch_size)
#decode sequences
target_labels = [i.decode("utf-8") for i in target_labels]
# +
num_classes = len(target_labels) #number of classes
model = DanQ(num_classes).to(device)
criterion = nn.BCEWithLogitsLoss() #- no weights
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# -
model, train_error, test_error, train_fscore, test_fscore = train_model(dataloaders['train'],
dataloaders['valid'], model,
device, criterion, optimizer,
num_epochs,
"../weights_multimodel_partial_DanQ",
"", verbose=True)
model.load_state_dict(torch.load("../weights_multimodel_partial_DanQ/model_epoch_9_.pth"))
model.eval();
# +
# %matplotlib inline
model.to(device);
labels_E, outputs_E = run_test(model, dataloaders['test'], device)
compute_metrics(labels_E, outputs_E)
| notebooks/Train_multi_model_DanQ.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# +
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
$('div.prompt').hide();
} else {
$('div.input').show();
$('div.prompt').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Code Toggle"></form>''')
# + [markdown] slideshow={"slide_type": "slide"}
# # Hackathon 2
#
# ** May 3, 2016 – Northwestern University **
#
# <img src="../../../images/hackathon2_photo.jpg" alt="">
#
# ## Note from the organizer
#
# CHiMaD hosted the second Phase Field Methods Hackathon on May 3, 2016, in connection with its [Phase Field Methods Workshop III]({{ site.baseurl }}/workshops/) on May 4 - 5, 2016. The motivation for the hackathon comes from one of CHiMaD’s missions: to develop community standards for phase field modeling in materials science, and to distribute community codes for phase field modeling. As part of this mission, CHiMaD is working on developing and distributing a set of standard problems for phase field modeling, analogous to how the micromagnetic community developed a set of [standard problems for micromagnetic modeling](http://www.ctcms.nist.gov/~rdm/mumag.org.html). A [first hackathon]({{ site.baseurl }}/hackathons/hackathon1/) was held on October 14 – 15, 2015, with problem sets focusing on Cahn-Hilliard and coupled Allen-Cahn/Cahn-Hilliard problems. This second Hackthon will explore other canonical problems central to phase-field type modeling of materials.
#
# Teams consisting of two students or postdocs were given the set of problems. They contain two kinds of phase field modeling, and each set contains sub-problems of increasing difficulty. The problems are completely defined in terms of initial conditions, geometry, and material parameters. The teams will have internet access and will be tasked with attempting to solve the problems within 24 hours using whatever numerical codes they have at their disposition - there will be no codes provided at the hackathon. All attendees are expected to bring their own laptops and connect to the servers they regularly use for running their codes.
#
# The goal of the hackathon is to see how different codes and different approaches can handle the problems with respect to accuracy and speed, and also to serve as a test bed for the development of standard problems. The aim of the hackathon is not to produce winners or losers, but to advance our understanding of phase field modeling: in that context, all results or attempts at solving the problems will be valuable. Each team will be required to present their results at the Phase Field Workshop on the morning of May 4.
# -
# ## Hackathon Challenge Problems and Solutions
#
# ### Challenge Problems
#
# * [Problem 1: Dendritic Growth in 2D](../problem1.ipynb)
# * [Problem 2: Linear Elasticity in 3D](../problem2.ipynb)
#
# <br>
# <br>
# ### Solutions
# #### University of Connecticut: Moose
#
# [GitHub Repo](https://github.com/kcpitike/Hackathon-2)
#
# <iframe src="//www.slideshare.net/slideshow/embed_code/key/1qcfv6PO3oSgS" width="425" height="355" frameborder="0" marginwidth="0" marginheight="0" scrolling="no" style="border:1px solid #CCC; border-width:1px; margin-bottom:5px; max-width: 100%;" allowfullscreen> </iframe>
#
# <strong>
# <a href="//www.slideshare.net/DanielWheeler18/chimad-phase-field-hackathon-2-university-of-connecticut" title="CHiMaD Hackathon 2: University of Connecticut" target="_blank">CHiMaD Phase Field Hackathon 2: University of Connecticut</a> </strong> by <strong>
# <a target="_blank" href="https://github.com/kcpitike"><NAME></a> </strong> and
# <strong><a target="_blank" href="https://github.com/mangerij"><NAME></a>
# </strong>
# <br>
# <br>
# #### Pennsylvania State University: Moose
#
# [GitHub Repo](https://github.com/wd15/penn-hackathon2/tree/master)
#
# <iframe src="//www.slideshare.net/slideshow/embed_code/key/Dbgdhk1Zs5JWtC" width="425" height="355" frameborder="0" marginwidth="0" marginheight="0" scrolling="no" style="border:1px solid #CCC; border-width:1px; margin-bottom:5px; max-width: 100%;" allowfullscreen> </iframe>
#
#
# <strong> <a href="//www.slideshare.net/DanielWheeler18/chimad-hackathon-2-pennsylvania-state-university" title="CHiMaD Hackathon 2: Pennsylvania State University" target="_blank">CHiMaD Hackathon 2: Pennsylvania State University</a>
# </strong> by
# <strong><a target="_blank" href="https://github.com/tonkmr"><NAME></a></strong>,
# <strong><a target="_blank" href="https://github.com/itgreenquist">I. Greenquist</a></strong> and
# <strong><a target="_blank" href="https://github.com/kasra83"><NAME></a></strong>
#
# <br>
# <br>
# #### McGill University: Unknown FE Code and Julia
#
# [GitHub Repo](https://github.com/nsmith5/chimadQ2)
#
# <iframe src="//www.slideshare.net/slideshow/embed_code/key/4QZp8GXqomEC0B" width="425" height="355" frameborder="0" marginwidth="0" marginheight="0" scrolling="no" style="border:1px solid #CCC; border-width:1px; margin-bottom:5px; max-width: 100%;" allowfullscreen> </iframe>
#
#
# <strong> <a href="//www.slideshare.net/DanielWheeler18/chimad-hackathon-2-team-mcgill" title="CHiMaD Hackathon 2: Team mcgill" target="_blank">CHiMaD Hackathon 2: McGill University</a>
# </strong> by
# <strong><a target="_blank" href="https://github.com/nsmith5"><NAME></a></strong>,
# <strong>T Pinomma</strong> and
# <strong><NAME></strong>
#
# <br>
# <br>
# #### NIST: FiPy
#
# [GitHub Repo](https://github.com/usnistgov/PhaseFieldHackathon-FiPy)
# <br>
# <br>
# #### INL: Moose
#
# <iframe src="//www.slideshare.net/slideshow/embed_code/key/E1OGX4YyH0HtzK" width="425" height="355" frameborder="0" marginwidth="0" marginheight="0" scrolling="no" style="border:1px solid #CCC; border-width:1px; margin-bottom:5px; max-width: 100%;" allowfullscreen> </iframe> <strong> <a href="//www.slideshare.net/DanielWheeler18/chimad-hackathon-2" title="CHiMaD Hackathon 2" target="_blank">CHiMaD Hackathon 2: INL</a> </strong> by
# <strong><NAME></strong> and
# <strong><NAME></strong>
#
# <br>
# <br>
# #### University of Michigan: PRISMS
#
# <iframe src="//www.slideshare.net/slideshow/embed_code/key/3ERQSQGzAh1dZq" width="425" height="355" frameborder="0" marginwidth="0" marginheight="0" scrolling="no" style="border:1px solid #CCC; border-width:1px; margin-bottom:5px; max-width: 100%;" allowfullscreen> </iframe> <strong> <a href="//www.slideshare.net/DanielWheeler18/chimad-hackathon-2-university-of-michigan" title="CHiMaD Hackathon 2: University of Michigan" target="_blank">CHiMaD Hackathon 2: University of Michigan</a> </strong> by
# <strong><NAME></strong> and
# <strong><NAME></strong>
#
| hackathons/hackathon2/index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import os
from glob import glob
from pprint import pprint
import json
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
import matplotlib.pyplot as plt
import cellcycle.PlottingTools as plottingTools
from cellcycle.ParameterSet import ParameterSet
import cellcycle.DataStorage as dataStorage
import cellcycle.DataAnalysis as dataAnalysis
import cellcycle.MakeDataframe as makeDataframe
from cellcycle import mainClass
# -
replication_speed = 'finite' # options 'finite' or 'infinite'
# +
file_path_input_params_json = '../../input_params.json'
input_param_dict = mainClass.extract_variables_from_input_params_json(file_path_input_params_json)
root_path = input_param_dict["DATA_FOLDER_PATH"]
simulation_location = 'SI/S1_gene_expr_ribo/'+replication_speed
file_path = os.path.join(root_path, simulation_location)
print('file_path', file_path)
parameter_path = os.path.join(file_path, 'parameter_set.csv')
print('parameter_path', parameter_path)
pinkish_red = (247 / 255, 109 / 255, 109 / 255)
green = (0 / 255, 133 / 255, 86 / 255)
dark_blue = (36 / 255, 49 / 255, 94 / 255)
light_blue = (168 / 255, 209 / 255, 231 / 255)
blue = (55 / 255, 71 / 255, 133 / 255)
yellow = (247 / 255, 233 / 255, 160 / 255)
dark_yellow = (235 / 255, 201 / 255, 27 / 255)
print(file_path)
# -
# # Make data frame from time traces
# +
data_frame = makeDataframe.make_dataframe(file_path)
time_traces_data_frame = pd.read_hdf(data_frame['path_dataset'].iloc[0], key='dataset_time_traces')
v_init_data_frame = pd.read_hdf(data_frame['path_dataset'].iloc[0], key='dataset_init_events')
v_init = v_init_data_frame.iloc[3]['v_init']
t_init_list = v_init_data_frame['t_init'].to_numpy()
print(t_init_list)
v_d_data_frame = pd.read_hdf(data_frame['path_dataset'].iloc[0], key='dataset_div_events')
t_div_list = v_d_data_frame['t_d'].to_numpy()
# +
time = np.array(time_traces_data_frame["time"])
volume = np.array(time_traces_data_frame["volume"])
n_ori = np.array(time_traces_data_frame["n_ori"])
n_p = np.array(time_traces_data_frame["N_init"])
n_s = np.array(time_traces_data_frame["sites_total"])
length_total = np.array(time_traces_data_frame["length_total"])
total_conc = n_p / volume
free_conc = np.array(time_traces_data_frame["free_conc"])
print(time.size)
t_0 = time[volume==v_d_data_frame['v_b'][2]]
indx_0 = np.where(time==t_0)[0][0]
t_f = time[volume==v_d_data_frame['v_b'][5]]
indx_f = np.where(time==t_f)[0][0]+20
print(indx_0, indx_f)
print(t_0, t_f)
n_ori_cut = n_ori[indx_0:indx_f]
length_total_cut = length_total[indx_0:indx_f]
time_cut = time[indx_0:indx_f]
volume_cut = volume[indx_0:indx_f]
n_ori_density_cut = n_ori_cut / volume_cut
n_p_cut = n_p[indx_0:indx_f]
n_s_cut = n_s[indx_0:indx_f]
total_conc_cut = total_conc[indx_0:indx_f]
free_conc_cut = free_conc[indx_0:indx_f]
t_init_list_cut_1 = t_init_list[t_init_list>t_0]
t_init_list_cut = t_init_list_cut_1[t_init_list_cut_1<t_f]
t_div_list_cut_1 = t_div_list[t_div_list>t_0]
t_div_list_cut = t_div_list_cut_1[t_div_list_cut_1<=t_f]
print(t_div_list, t_div_list_cut)
# -
# # Plot three figures
# +
x_axes_list = [time_cut, time_cut, time_cut, time_cut]
if model == 'standard':
phi = n_ori_cut
label = r'$n_{\rm ori}(t)$'
else:
if replication_speed == 'finite':
phi = n_ori_cut/length_total_cut
else:
phi = np.ones(volume_cut.size)
label = r'$\phi(t)$'
y_axes_list = [volume_cut, phi, n_p_cut, total_conc_cut, free_conc_cut]
color_list = [green, dark_yellow, dark_blue, pinkish_red]
y_min_list = [0,0,0,0]
y_max_list = [1, 1.2, 1.2, 1.2]
label_list = [r'$V(t) \, [\mu m^3]$', label, r'$N(t)$', r'$[p]_{\rm T}(t)$']
doubling_time = 1/data_frame.iloc[0]['doubling_rate']
fig, ax = plt.subplots(4, figsize=(3.2,4))
plt.xlabel(r'time [$\tau_{\rm d}$]')
for item in range(0, len(label_list)):
ax[item].set_ylabel(label_list[item])
ax[item].plot(x_axes_list[item], y_axes_list[item], color=color_list[item], zorder=3)
ax[item].set_ylim(ymin=0)
ax[item].tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
ax[item].spines["top"].set_visible(False)
ax[item].spines["right"].set_visible(False)
ax[item].margins(0)
for t_div in t_div_list_cut:
ax[item].axvline(x=t_div,
ymin=y_min_list[item],
ymax=y_max_list[item],
c="black",
zorder=0,
linewidth=0.8,
clip_on=False)
for t_init in t_init_list_cut:
ax[item].axvline(x=t_init,
ymin=y_min_list[item],
ymax=y_max_list[item],
c="black",
zorder=0,
linewidth=0.8,
linestyle='--',
clip_on=False)
ax[0].set_yticks([0,v_init])
ax[0].set_yticklabels(['0', r'$v^\ast$'])
ax[0].get_yticklabels()[1].set_color(color_list[0])
# # ax[0].tick_params(axis='y', colors=green)
ax[0].axhline(y=v_init, color=green, linestyle='--', label=r'$v^\ast$')
# ax[0].axhline(y=2*v_init, color=green, linestyle='--')
ax[1].set_yticks([0,1, 2])
ax[1].set_yticklabels(['0', '1', '2'])
# ax[1].get_yticklabels()[1].set_color(dark_yellow)
# ax[2].axhline(y=data_frame.iloc[0]['michaelis_const_regulator'], color=pinkish_red, linestyle='--')
# ax[2].set_yticks([0, data_frame.iloc[0]['michaelis_const_regulator']])
# ax[2].set_yticklabels([0, r'$K_{\rm D}^{\rm r}$'])
# ax[2].get_yticklabels()[1].set_color(color_list[2])
# ax[3].axhline(y=data_frame.iloc[0]['critical_free_conc'], color=color_list[3], linestyle='--')
# ax[3].set_yticks([0, data_frame.iloc[0]['critical_free_conc']])
# ax[3].set_yticklabels(['0', r'$K_{\rm D}^{\rm ori}$'])
# ax[3].get_yticklabels()[1].set_color(color_list[3])
# ax[3].tick_params(bottom=True, labelbottom=True)
# ax[3].tick_params(axis='x', colors='black')
if version == 'neg_auto':
ax[3].axhline(y=data_frame.iloc[0]['michaelis_const_initiator'], color=pinkish_red, linestyle='--')
ax[3].set_yticks([0, data_frame.iloc[0]['michaelis_const_initiator']])
ax[3].set_yticklabels([0, r'$K_{\rm D}^{\rm p}$'])
ax[3].set(ylim=(0,np.amax(total_conc_cut)*1.2))
ax[3].set_xticks([time_cut[0],
time_cut[0]+ doubling_time,
time_cut[0]+ 2*doubling_time,
time_cut[0]+ 3*doubling_time])
ax[3].set_xticklabels(['0', '1', '2', '3'])
ax[3].margins(0)
ax[3].tick_params( # ticks along the bottom edge are off
bottom=True, # ticks along the top edge are off
labelbottom=True)
print(doubling_time, time_cut[0], time_cut[0]+ doubling_time / np.log(2))
plt.savefig(file_path + '/FigS1_'+version+'_'+replication_speed+'.pdf', format='pdf',bbox_inches='tight')
# -
| notebooks/SI/S01_effect_finite_replication_rate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.032216, "end_time": "2020-10-26T23:23:00.267579", "exception": false, "start_time": "2020-10-26T23:23:00.235363", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# # RadarCOVID-Report
# + [markdown] papermill={"duration": 0.028264, "end_time": "2020-10-26T23:23:00.324567", "exception": false, "start_time": "2020-10-26T23:23:00.296303", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ## Data Extraction
# + papermill={"duration": 3.241531, "end_time": "2020-10-26T23:23:03.596267", "exception": false, "start_time": "2020-10-26T23:23:00.354736", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
import datetime
import json
import logging
import os
import shutil
import tempfile
import textwrap
import uuid
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import pandas as pd
import retry
import seaborn as sns
# %matplotlib inline
# + papermill={"duration": 0.039176, "end_time": "2020-10-26T23:23:03.664830", "exception": false, "start_time": "2020-10-26T23:23:03.625654", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
current_working_directory = os.environ.get("PWD")
if current_working_directory:
os.chdir(current_working_directory)
sns.set()
matplotlib.rcParams["figure.figsize"] = (15, 6)
extraction_datetime = datetime.datetime.utcnow()
extraction_date = extraction_datetime.strftime("%Y-%m-%d")
extraction_previous_datetime = extraction_datetime - datetime.timedelta(days=1)
extraction_previous_date = extraction_previous_datetime.strftime("%Y-%m-%d")
extraction_date_with_hour = datetime.datetime.utcnow().strftime("%Y-%m-%d@%H")
current_hour = datetime.datetime.utcnow().hour
are_today_results_partial = current_hour != 23
# + [markdown] papermill={"duration": 0.029616, "end_time": "2020-10-26T23:23:03.724548", "exception": false, "start_time": "2020-10-26T23:23:03.694932", "status": "completed"} tags=[]
# ### Constants
# + papermill={"duration": 0.142017, "end_time": "2020-10-26T23:23:03.899160", "exception": false, "start_time": "2020-10-26T23:23:03.757143", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
from Modules.ExposureNotification import exposure_notification_io
spain_region_country_code = "ES"
germany_region_country_code = "DE"
default_backend_identifier = spain_region_country_code
backend_generation_days = 7 * 2
daily_summary_days = 7 * 4 * 3
daily_plot_days = 7 * 4
tek_dumps_load_limit = daily_summary_days + 1
default_shared_diagnosis_generation_to_upload_days = 1
# + [markdown] papermill={"duration": 0.029355, "end_time": "2020-10-26T23:23:03.959448", "exception": false, "start_time": "2020-10-26T23:23:03.930093", "status": "completed"} tags=[]
# ### Parameters
# + papermill={"duration": 0.043127, "end_time": "2020-10-26T23:23:04.034246", "exception": false, "start_time": "2020-10-26T23:23:03.991119", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
environment_backend_identifier = os.environ.get("RADARCOVID_REPORT__BACKEND_IDENTIFIER")
if environment_backend_identifier:
report_backend_identifier = environment_backend_identifier
else:
report_backend_identifier = default_backend_identifier
report_backend_identifier
# + papermill={"duration": 2.56465, "end_time": "2020-10-26T23:23:06.630476", "exception": false, "start_time": "2020-10-26T23:23:04.065826", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
efgs_supported_countries_backend_identifier = germany_region_country_code
efgs_supported_countries_backend_client = \
exposure_notification_io.get_backend_client_with_identifier(
backend_identifier=efgs_supported_countries_backend_identifier)
efgs_source_regions = efgs_supported_countries_backend_client.get_supported_countries()
if report_backend_identifier in efgs_source_regions:
default_source_regions = "EFGS"
else:
default_source_regions = report_backend_identifier.split("-")[0].split("@")[0]
environment_source_regions = os.environ.get("RADARCOVID_REPORT__SOURCE_REGIONS")
if environment_source_regions:
report_source_regions = environment_source_regions
else:
report_source_regions = default_source_regions
if report_source_regions == "EFGS":
if report_backend_identifier in efgs_source_regions:
efgs_source_regions = \
[report_backend_identifier] + \
sorted(list(set(efgs_source_regions).difference([report_backend_identifier])))
report_source_regions = efgs_source_regions
else:
report_source_regions = report_source_regions.split(",")
report_source_regions
# + papermill={"duration": 0.038671, "end_time": "2020-10-26T23:23:06.698001", "exception": false, "start_time": "2020-10-26T23:23:06.659330", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
environment_download_only_from_report_backend = \
os.environ.get("RADARCOVID_REPORT__DOWNLOAD_ONLY_FROM_REPORT_BACKEND")
if environment_download_only_from_report_backend:
report_backend_identifiers = [report_backend_identifier]
else:
report_backend_identifiers = None
report_backend_identifiers
# + papermill={"duration": 0.036311, "end_time": "2020-10-26T23:23:06.762019", "exception": false, "start_time": "2020-10-26T23:23:06.725708", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
environment_shared_diagnosis_generation_to_upload_days = \
os.environ.get("RADARCOVID_REPORT__SHARED_DIAGNOSIS_GENERATION_TO_UPLOAD_DAYS")
if environment_shared_diagnosis_generation_to_upload_days:
shared_diagnosis_generation_to_upload_days = \
int(environment_shared_diagnosis_generation_to_upload_days)
else:
shared_diagnosis_generation_to_upload_days = \
default_shared_diagnosis_generation_to_upload_days
shared_diagnosis_generation_to_upload_days
# + [markdown] papermill={"duration": 0.028764, "end_time": "2020-10-26T23:23:06.820519", "exception": false, "start_time": "2020-10-26T23:23:06.791755", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### COVID-19 Cases
# + papermill={"duration": 11.738656, "end_time": "2020-10-26T23:23:18.588921", "exception": false, "start_time": "2020-10-26T23:23:06.850265", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
@retry.retry(tries=10, delay=10, backoff=1.1, jitter=(0, 10))
def download_cases_dataframe_from_ecdc():
return pd.read_csv(
"https://opendata.ecdc.europa.eu/covid19/casedistribution/csv/data.csv")
confirmed_df = download_cases_dataframe_from_ecdc()
radar_covid_countries = set(report_source_regions)
confirmed_df = confirmed_df[["dateRep", "cases", "geoId"]]
confirmed_df.rename(
columns={
"dateRep":"sample_date",
"cases": "new_cases",
"geoId": "country_code",
},
inplace=True)
confirmed_df = confirmed_df[confirmed_df.country_code.isin(radar_covid_countries)]
confirmed_df["sample_date"] = pd.to_datetime(confirmed_df.sample_date, dayfirst=True)
confirmed_df = confirmed_df.groupby("sample_date").new_cases.sum().reset_index()
confirmed_df.sort_values("sample_date", inplace=True)
confirmed_df.tail()
# + papermill={"duration": 0.046534, "end_time": "2020-10-26T23:23:18.667152", "exception": false, "start_time": "2020-10-26T23:23:18.620618", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
confirmed_days = pd.date_range(
start=confirmed_df.iloc[0].sample_date,
end=extraction_datetime)
confirmed_days_df = pd.DataFrame(data=confirmed_days, columns=["sample_date"])
confirmed_df = confirmed_days_df.merge(confirmed_df, how="left")
confirmed_df["sample_date"] = confirmed_df.sample_date.dt.strftime("%Y-%m-%d")
confirmed_df.sort_values("sample_date", inplace=True)
confirmed_df.tail()
# + papermill={"duration": 0.047936, "end_time": "2020-10-26T23:23:18.745615", "exception": false, "start_time": "2020-10-26T23:23:18.697679", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
confirmed_df.columns = ["sample_date_string", "new_cases"]
confirmed_df.sort_values("sample_date_string", inplace=True)
confirmed_df["covid_cases"] = confirmed_df.new_cases.rolling(7).mean().round()
confirmed_df.fillna(method="ffill", inplace=True)
confirmed_df.tail()
# + papermill={"duration": 0.201825, "end_time": "2020-10-26T23:23:18.980198", "exception": false, "start_time": "2020-10-26T23:23:18.778373", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
confirmed_df[["new_cases", "covid_cases"]].plot()
# + [markdown] papermill={"duration": 0.032446, "end_time": "2020-10-26T23:23:19.046883", "exception": false, "start_time": "2020-10-26T23:23:19.014437", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Extract API TEKs
# + papermill={"duration": 208.884098, "end_time": "2020-10-26T23:26:47.965253", "exception": false, "start_time": "2020-10-26T23:23:19.081155", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
raw_zip_path_prefix = "Data/TEKs/Raw/"
fail_on_error_backend_identifiers = [report_backend_identifier]
multi_backend_exposure_keys_df = \
exposure_notification_io.download_exposure_keys_from_backends(
backend_identifiers=report_backend_identifiers,
generation_days=backend_generation_days,
fail_on_error_backend_identifiers=fail_on_error_backend_identifiers,
save_raw_zip_path_prefix=raw_zip_path_prefix)
multi_backend_exposure_keys_df["region"] = multi_backend_exposure_keys_df["backend_identifier"]
multi_backend_exposure_keys_df.rename(
columns={
"generation_datetime": "sample_datetime",
"generation_date_string": "sample_date_string",
},
inplace=True)
multi_backend_exposure_keys_df.head()
# + papermill={"duration": 0.231288, "end_time": "2020-10-26T23:26:48.238349", "exception": false, "start_time": "2020-10-26T23:26:48.007061", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
early_teks_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.rolling_period < 144].copy()
early_teks_df["rolling_period_in_hours"] = early_teks_df.rolling_period / 6
early_teks_df[early_teks_df.sample_date_string != extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
# + papermill={"duration": 0.244562, "end_time": "2020-10-26T23:26:48.522083", "exception": false, "start_time": "2020-10-26T23:26:48.277521", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
early_teks_df[early_teks_df.sample_date_string == extraction_date] \
.rolling_period_in_hours.hist(bins=list(range(24)))
# + papermill={"duration": 0.064431, "end_time": "2020-10-26T23:26:48.630858", "exception": false, "start_time": "2020-10-26T23:26:48.566427", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
multi_backend_exposure_keys_df = multi_backend_exposure_keys_df[[
"sample_date_string", "region", "key_data"]]
multi_backend_exposure_keys_df.head()
# + papermill={"duration": 0.971493, "end_time": "2020-10-26T23:26:49.640210", "exception": false, "start_time": "2020-10-26T23:26:48.668717", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
active_regions = \
multi_backend_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
active_regions
# + papermill={"duration": 1.008243, "end_time": "2020-10-26T23:26:50.689198", "exception": false, "start_time": "2020-10-26T23:26:49.680955", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
multi_backend_summary_df = multi_backend_exposure_keys_df.groupby(
["sample_date_string", "region"]).key_data.nunique().reset_index() \
.pivot(index="sample_date_string", columns="region") \
.sort_index(ascending=False)
multi_backend_summary_df.rename(
columns={"key_data": "shared_teks_by_generation_date"},
inplace=True)
multi_backend_summary_df.rename_axis("sample_date", inplace=True)
multi_backend_summary_df = multi_backend_summary_df.fillna(0).astype(int)
multi_backend_summary_df = multi_backend_summary_df.head(backend_generation_days)
multi_backend_summary_df.head()
# + papermill={"duration": 1.656544, "end_time": "2020-10-26T23:26:52.384808", "exception": false, "start_time": "2020-10-26T23:26:50.728264", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
def compute_keys_cross_sharing(x):
teks_x = x.key_data_x.item()
common_teks = set(teks_x).intersection(x.key_data_y.item())
common_teks_fraction = len(common_teks) / len(teks_x)
return pd.Series(dict(
common_teks=common_teks,
common_teks_fraction=common_teks_fraction,
))
multi_backend_exposure_keys_by_region_df = \
multi_backend_exposure_keys_df.groupby("region").key_data.unique().reset_index()
multi_backend_exposure_keys_by_region_df["_merge"] = True
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_df.merge(
multi_backend_exposure_keys_by_region_df, on="_merge")
multi_backend_exposure_keys_by_region_combination_df.drop(
columns=["_merge"], inplace=True)
multi_backend_exposure_keys_by_region_combination_df = \
multi_backend_exposure_keys_by_region_combination_df[
multi_backend_exposure_keys_by_region_combination_df.region_x !=
multi_backend_exposure_keys_by_region_combination_df.region_y]
multi_backend_exposure_keys_cross_sharing_df = \
multi_backend_exposure_keys_by_region_combination_df \
.groupby(["region_x", "region_y"]) \
.apply(compute_keys_cross_sharing) \
.reset_index()
multi_backend_cross_sharing_summary_df = \
multi_backend_exposure_keys_cross_sharing_df.pivot_table(
values=["common_teks_fraction"],
columns="region_x",
index="region_y",
aggfunc=lambda x: x.item())
multi_backend_cross_sharing_summary_df
# + papermill={"duration": 0.942152, "end_time": "2020-10-26T23:26:53.365746", "exception": false, "start_time": "2020-10-26T23:26:52.423594", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
multi_backend_without_active_region_exposure_keys_df = \
multi_backend_exposure_keys_df[multi_backend_exposure_keys_df.region != report_backend_identifier]
multi_backend_without_active_region = \
multi_backend_without_active_region_exposure_keys_df.groupby("region").key_data.nunique().sort_values().index.unique().tolist()
multi_backend_without_active_region
# + papermill={"duration": 0.108571, "end_time": "2020-10-26T23:26:53.520739", "exception": false, "start_time": "2020-10-26T23:26:53.412168", "status": "completed"} tags=[]
exposure_keys_summary_df = multi_backend_exposure_keys_df[
multi_backend_exposure_keys_df.region == report_backend_identifier]
exposure_keys_summary_df.drop(columns=["region"], inplace=True)
exposure_keys_summary_df = \
exposure_keys_summary_df.groupby(["sample_date_string"]).key_data.nunique().to_frame()
exposure_keys_summary_df = \
exposure_keys_summary_df.reset_index().set_index("sample_date_string")
exposure_keys_summary_df.sort_index(ascending=False, inplace=True)
exposure_keys_summary_df.rename(columns={"key_data": "shared_teks_by_generation_date"}, inplace=True)
exposure_keys_summary_df.head()
# + [markdown] papermill={"duration": 0.04021, "end_time": "2020-10-26T23:26:53.606375", "exception": false, "start_time": "2020-10-26T23:26:53.566165", "status": "completed"} tags=[]
# ### Dump API TEKs
# + papermill={"duration": 0.881237, "end_time": "2020-10-26T23:26:54.526054", "exception": false, "start_time": "2020-10-26T23:26:53.644817", "status": "completed"} tags=[]
tek_list_df = multi_backend_exposure_keys_df[
["sample_date_string", "region", "key_data"]].copy()
tek_list_df["key_data"] = tek_list_df["key_data"].apply(str)
tek_list_df.rename(columns={
"sample_date_string": "sample_date",
"key_data": "tek_list"}, inplace=True)
tek_list_df = tek_list_df.groupby(
["sample_date", "region"]).tek_list.unique().reset_index()
tek_list_df["extraction_date"] = extraction_date
tek_list_df["extraction_date_with_hour"] = extraction_date_with_hour
tek_list_path_prefix = "Data/TEKs/"
tek_list_current_path = tek_list_path_prefix + f"/Current/RadarCOVID-TEKs.json"
tek_list_daily_path = tek_list_path_prefix + f"Daily/RadarCOVID-TEKs-{extraction_date}.json"
tek_list_hourly_path = tek_list_path_prefix + f"Hourly/RadarCOVID-TEKs-{extraction_date_with_hour}.json"
for path in [tek_list_current_path, tek_list_daily_path, tek_list_hourly_path]:
os.makedirs(os.path.dirname(path), exist_ok=True)
tek_list_df.drop(columns=["extraction_date", "extraction_date_with_hour"]).to_json(
tek_list_current_path,
lines=True, orient="records")
tek_list_df.drop(columns=["extraction_date_with_hour"]).to_json(
tek_list_daily_path,
lines=True, orient="records")
tek_list_df.to_json(
tek_list_hourly_path,
lines=True, orient="records")
tek_list_df.head()
# + [markdown] papermill={"duration": 0.038877, "end_time": "2020-10-26T23:26:54.603001", "exception": false, "start_time": "2020-10-26T23:26:54.564124", "status": "completed"} tags=[]
# ### Load TEK Dumps
# + papermill={"duration": 0.0458, "end_time": "2020-10-26T23:26:54.686941", "exception": false, "start_time": "2020-10-26T23:26:54.641141", "status": "completed"} tags=[]
import glob
def load_extracted_teks(mode, region=None, limit=None) -> pd.DataFrame:
extracted_teks_df = pd.DataFrame(columns=["region"])
file_paths = list(reversed(sorted(glob.glob(tek_list_path_prefix + mode + "/RadarCOVID-TEKs-*.json"))))
if limit:
file_paths = file_paths[:limit]
for file_path in file_paths:
logging.info(f"Loading TEKs from '{file_path}'...")
iteration_extracted_teks_df = pd.read_json(file_path, lines=True)
extracted_teks_df = extracted_teks_df.append(
iteration_extracted_teks_df, sort=False)
extracted_teks_df["region"] = \
extracted_teks_df.region.fillna(spain_region_country_code).copy()
if region:
extracted_teks_df = \
extracted_teks_df[extracted_teks_df.region == region]
return extracted_teks_df
# + papermill={"duration": 0.715802, "end_time": "2020-10-26T23:26:55.440082", "exception": false, "start_time": "2020-10-26T23:26:54.724280", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
daily_extracted_teks_df = load_extracted_teks(
mode="Daily",
region=report_backend_identifier,
limit=tek_dumps_load_limit)
daily_extracted_teks_df.head()
# + papermill={"duration": 0.055819, "end_time": "2020-10-26T23:26:55.543714", "exception": false, "start_time": "2020-10-26T23:26:55.487895", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
exposure_keys_summary_df_ = daily_extracted_teks_df \
.sort_values("extraction_date", ascending=False) \
.groupby("sample_date").tek_list.first() \
.to_frame()
exposure_keys_summary_df_.index.name = "sample_date_string"
exposure_keys_summary_df_["tek_list"] = \
exposure_keys_summary_df_.tek_list.apply(len)
exposure_keys_summary_df_ = exposure_keys_summary_df_ \
.rename(columns={"tek_list": "shared_teks_by_generation_date"}) \
.sort_index(ascending=False)
exposure_keys_summary_df = exposure_keys_summary_df_
exposure_keys_summary_df.head()
# + [markdown] papermill={"duration": 0.039609, "end_time": "2020-10-26T23:26:55.627501", "exception": false, "start_time": "2020-10-26T23:26:55.587892", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Daily New TEKs
# + papermill={"duration": 0.077878, "end_time": "2020-10-26T23:26:55.745494", "exception": false, "start_time": "2020-10-26T23:26:55.667616", "status": "completed"} tags=[]
tek_list_df = daily_extracted_teks_df.groupby("extraction_date").tek_list.apply(
lambda x: set(sum(x, []))).reset_index()
tek_list_df = tek_list_df.set_index("extraction_date").sort_index(ascending=True)
tek_list_df.head()
# + papermill={"duration": 0.920911, "end_time": "2020-10-26T23:26:56.706685", "exception": false, "start_time": "2020-10-26T23:26:55.785774", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
def compute_teks_by_generation_and_upload_date(date):
day_new_teks_set_df = tek_list_df.copy().diff()
try:
day_new_teks_set = day_new_teks_set_df[
day_new_teks_set_df.index == date].tek_list.item()
except ValueError:
day_new_teks_set = None
if pd.isna(day_new_teks_set):
day_new_teks_set = set()
day_new_teks_df = daily_extracted_teks_df[
daily_extracted_teks_df.extraction_date == date].copy()
day_new_teks_df["shared_teks"] = \
day_new_teks_df.tek_list.apply(lambda x: set(x).intersection(day_new_teks_set))
day_new_teks_df["shared_teks"] = \
day_new_teks_df.shared_teks.apply(len)
day_new_teks_df["upload_date"] = date
day_new_teks_df.rename(columns={"sample_date": "generation_date"}, inplace=True)
day_new_teks_df = day_new_teks_df[
["upload_date", "generation_date", "shared_teks"]]
day_new_teks_df["generation_to_upload_days"] = \
(pd.to_datetime(day_new_teks_df.upload_date) -
pd.to_datetime(day_new_teks_df.generation_date)).dt.days
day_new_teks_df = day_new_teks_df[day_new_teks_df.shared_teks > 0]
return day_new_teks_df
shared_teks_generation_to_upload_df = pd.DataFrame()
for upload_date in daily_extracted_teks_df.extraction_date.unique():
shared_teks_generation_to_upload_df = \
shared_teks_generation_to_upload_df.append(
compute_teks_by_generation_and_upload_date(date=upload_date))
shared_teks_generation_to_upload_df \
.sort_values(["upload_date", "generation_date"], ascending=False, inplace=True)
shared_teks_generation_to_upload_df.tail()
# + papermill={"duration": 0.060275, "end_time": "2020-10-26T23:26:56.806321", "exception": false, "start_time": "2020-10-26T23:26:56.746046", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
today_new_teks_df = \
shared_teks_generation_to_upload_df[
shared_teks_generation_to_upload_df.upload_date == extraction_date].copy()
today_new_teks_df.tail()
# + papermill={"duration": 0.194691, "end_time": "2020-10-26T23:26:57.040056", "exception": false, "start_time": "2020-10-26T23:26:56.845365", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
if not today_new_teks_df.empty:
today_new_teks_df.set_index("generation_to_upload_days") \
.sort_index().shared_teks.plot.bar()
# + papermill={"duration": 0.058736, "end_time": "2020-10-26T23:26:57.140751", "exception": false, "start_time": "2020-10-26T23:26:57.082015", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
generation_to_upload_period_pivot_df = \
shared_teks_generation_to_upload_df[
["upload_date", "generation_to_upload_days", "shared_teks"]] \
.pivot(index="upload_date", columns="generation_to_upload_days") \
.sort_index(ascending=False).fillna(0).astype(int) \
.droplevel(level=0, axis=1)
generation_to_upload_period_pivot_df.head()
# + papermill={"duration": 0.065838, "end_time": "2020-10-26T23:26:57.248417", "exception": false, "start_time": "2020-10-26T23:26:57.182579", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
new_tek_df = tek_list_df.diff().tek_list.apply(
lambda x: len(x) if not pd.isna(x) else None).to_frame().reset_index()
new_tek_df.rename(columns={
"tek_list": "shared_teks_by_upload_date",
"extraction_date": "sample_date_string",}, inplace=True)
new_tek_df.tail()
# + papermill={"duration": 0.066403, "end_time": "2020-10-26T23:26:57.356499", "exception": false, "start_time": "2020-10-26T23:26:57.290096", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
estimated_shared_diagnoses_df = daily_extracted_teks_df.copy()
estimated_shared_diagnoses_df["new_sample_extraction_date"] = \
pd.to_datetime(estimated_shared_diagnoses_df.sample_date) + \
datetime.timedelta(shared_diagnosis_generation_to_upload_days)
estimated_shared_diagnoses_df["extraction_date"] = pd.to_datetime(estimated_shared_diagnoses_df.extraction_date)
estimated_shared_diagnoses_df["sample_date"] = pd.to_datetime(estimated_shared_diagnoses_df.sample_date)
estimated_shared_diagnoses_df.head()
# + papermill={"duration": 0.062764, "end_time": "2020-10-26T23:26:57.466301", "exception": false, "start_time": "2020-10-26T23:26:57.403537", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
# Sometimes TEKs from the same day are uploaded, we do not count them as new TEK devices:
same_day_tek_list_df = estimated_shared_diagnoses_df[
estimated_shared_diagnoses_df.sample_date == estimated_shared_diagnoses_df.extraction_date].copy()
same_day_tek_list_df = same_day_tek_list_df[["extraction_date", "tek_list"]].rename(
columns={"tek_list": "same_day_tek_list"})
same_day_tek_list_df.head()
# + papermill={"duration": 0.054479, "end_time": "2020-10-26T23:26:57.566787", "exception": false, "start_time": "2020-10-26T23:26:57.512308", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
shared_teks_uploaded_on_generation_date_df = same_day_tek_list_df.rename(
columns={
"extraction_date": "sample_date_string",
"same_day_tek_list": "shared_teks_uploaded_on_generation_date",
})
shared_teks_uploaded_on_generation_date_df.shared_teks_uploaded_on_generation_date = \
shared_teks_uploaded_on_generation_date_df.shared_teks_uploaded_on_generation_date.apply(len)
shared_teks_uploaded_on_generation_date_df.head()
shared_teks_uploaded_on_generation_date_df["sample_date_string"] = \
shared_teks_uploaded_on_generation_date_df.sample_date_string.dt.strftime("%Y-%m-%d")
shared_teks_uploaded_on_generation_date_df.head()
# + papermill={"duration": 0.059672, "end_time": "2020-10-26T23:26:57.672789", "exception": false, "start_time": "2020-10-26T23:26:57.613117", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
estimated_shared_diagnoses_df = estimated_shared_diagnoses_df[
estimated_shared_diagnoses_df.new_sample_extraction_date == estimated_shared_diagnoses_df.extraction_date]
estimated_shared_diagnoses_df.head()
# + papermill={"duration": 0.067491, "end_time": "2020-10-26T23:26:57.782322", "exception": false, "start_time": "2020-10-26T23:26:57.714831", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
same_day_tek_list_df["extraction_date"] = \
same_day_tek_list_df.extraction_date + datetime.timedelta(1)
estimated_shared_diagnoses_df = \
estimated_shared_diagnoses_df.merge(same_day_tek_list_df, how="left", on=["extraction_date"])
estimated_shared_diagnoses_df["same_day_tek_list"] = \
estimated_shared_diagnoses_df.same_day_tek_list.apply(lambda x: [] if x is np.nan else x)
estimated_shared_diagnoses_df.head()
# + papermill={"duration": 0.059646, "end_time": "2020-10-26T23:26:57.884863", "exception": false, "start_time": "2020-10-26T23:26:57.825217", "status": "completed"} tags=[]
estimated_shared_diagnoses_df.set_index("extraction_date", inplace=True)
if estimated_shared_diagnoses_df.empty:
estimated_shared_diagnoses_df["shared_diagnoses"] = 0
else:
estimated_shared_diagnoses_df["shared_diagnoses"] = estimated_shared_diagnoses_df.apply(
lambda x: len(set(x.tek_list).difference(x.same_day_tek_list)), axis=1).copy()
estimated_shared_diagnoses_df.reset_index(inplace=True)
estimated_shared_diagnoses_df.rename(columns={
"extraction_date": "sample_date_string"}, inplace=True)
estimated_shared_diagnoses_df = estimated_shared_diagnoses_df[["sample_date_string", "shared_diagnoses"]]
if not estimated_shared_diagnoses_df.empty:
estimated_shared_diagnoses_df["sample_date_string"] = \
estimated_shared_diagnoses_df.sample_date_string.dt.strftime("%Y-%m-%d")
estimated_shared_diagnoses_df.head()
# + [markdown] papermill={"duration": 0.050791, "end_time": "2020-10-26T23:26:57.978725", "exception": false, "start_time": "2020-10-26T23:26:57.927934", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Hourly New TEKs
# + papermill={"duration": 1.732967, "end_time": "2020-10-26T23:26:59.755287", "exception": false, "start_time": "2020-10-26T23:26:58.022320", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
hourly_extracted_teks_df = load_extracted_teks(
mode="Hourly", region=report_backend_identifier, limit=25)
hourly_extracted_teks_df.head()
# + papermill={"duration": 0.104925, "end_time": "2020-10-26T23:26:59.903767", "exception": false, "start_time": "2020-10-26T23:26:59.798842", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
hourly_new_tek_count_df = hourly_extracted_teks_df \
.groupby("extraction_date_with_hour").tek_list. \
apply(lambda x: set(sum(x, []))).reset_index().copy()
hourly_new_tek_count_df = hourly_new_tek_count_df.set_index("extraction_date_with_hour") \
.sort_index(ascending=True)
hourly_new_tek_count_df["new_tek_list"] = hourly_new_tek_count_df.tek_list.diff()
hourly_new_tek_count_df["new_tek_count"] = hourly_new_tek_count_df.new_tek_list.apply(
lambda x: len(x) if not pd.isna(x) else 0)
hourly_new_tek_count_df.rename(columns={
"new_tek_count": "shared_teks_by_upload_date"}, inplace=True)
hourly_new_tek_count_df = hourly_new_tek_count_df.reset_index()[[
"extraction_date_with_hour", "shared_teks_by_upload_date"]]
hourly_new_tek_count_df.head()
# + papermill={"duration": 0.079686, "end_time": "2020-10-26T23:27:00.027232", "exception": false, "start_time": "2020-10-26T23:26:59.947546", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
hourly_estimated_shared_diagnoses_df = hourly_extracted_teks_df.copy()
hourly_estimated_shared_diagnoses_df["new_sample_extraction_date"] = \
pd.to_datetime(hourly_estimated_shared_diagnoses_df.sample_date) + \
datetime.timedelta(shared_diagnosis_generation_to_upload_days)
hourly_estimated_shared_diagnoses_df["extraction_date"] = \
pd.to_datetime(hourly_estimated_shared_diagnoses_df.extraction_date)
hourly_estimated_shared_diagnoses_df = hourly_estimated_shared_diagnoses_df[
hourly_estimated_shared_diagnoses_df.new_sample_extraction_date ==
hourly_estimated_shared_diagnoses_df.extraction_date]
hourly_estimated_shared_diagnoses_df = \
hourly_estimated_shared_diagnoses_df.merge(same_day_tek_list_df, how="left", on=["extraction_date"])
hourly_estimated_shared_diagnoses_df["same_day_tek_list"] = \
hourly_estimated_shared_diagnoses_df.same_day_tek_list.apply(lambda x: [] if x is np.nan else x)
if hourly_estimated_shared_diagnoses_df.empty:
hourly_estimated_shared_diagnoses_df["shared_diagnoses"] = 0
else:
hourly_estimated_shared_diagnoses_df["shared_diagnoses"] = \
hourly_estimated_shared_diagnoses_df.apply(
lambda x: len(set(x.tek_list).difference(x.same_day_tek_list)), axis=1)
hourly_estimated_shared_diagnoses_df = \
hourly_estimated_shared_diagnoses_df.sort_values("extraction_date_with_hour").copy()
hourly_estimated_shared_diagnoses_df["shared_diagnoses"] = hourly_estimated_shared_diagnoses_df \
.groupby("extraction_date").shared_diagnoses.diff() \
.fillna(0).astype(int)
hourly_estimated_shared_diagnoses_df.set_index("extraction_date_with_hour", inplace=True)
hourly_estimated_shared_diagnoses_df.reset_index(inplace=True)
hourly_estimated_shared_diagnoses_df = hourly_estimated_shared_diagnoses_df[[
"extraction_date_with_hour", "shared_diagnoses"]]
hourly_estimated_shared_diagnoses_df.head()
# + papermill={"duration": 0.059209, "end_time": "2020-10-26T23:27:00.131565", "exception": false, "start_time": "2020-10-26T23:27:00.072356", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
hourly_summary_df = hourly_new_tek_count_df.merge(
hourly_estimated_shared_diagnoses_df, on=["extraction_date_with_hour"], how="outer")
hourly_summary_df.set_index("extraction_date_with_hour", inplace=True)
hourly_summary_df = hourly_summary_df.fillna(0).astype(int).reset_index()
hourly_summary_df["datetime_utc"] = pd.to_datetime(
hourly_summary_df.extraction_date_with_hour, format="%Y-%m-%d@%H")
hourly_summary_df.set_index("datetime_utc", inplace=True)
hourly_summary_df = hourly_summary_df.tail(-1)
hourly_summary_df.head()
# + [markdown] papermill={"duration": 0.044979, "end_time": "2020-10-26T23:27:00.219716", "exception": false, "start_time": "2020-10-26T23:27:00.174737", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Data Merge
# + papermill={"duration": 0.06102, "end_time": "2020-10-26T23:27:00.326619", "exception": false, "start_time": "2020-10-26T23:27:00.265599", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_summary_df = exposure_keys_summary_df.merge(
new_tek_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
# + papermill={"duration": 0.057808, "end_time": "2020-10-26T23:27:00.428797", "exception": false, "start_time": "2020-10-26T23:27:00.370989", "status": "completed"} tags=[]
result_summary_df = result_summary_df.merge(
shared_teks_uploaded_on_generation_date_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
# + papermill={"duration": 0.060628, "end_time": "2020-10-26T23:27:00.535492", "exception": false, "start_time": "2020-10-26T23:27:00.474864", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_summary_df = result_summary_df.merge(
estimated_shared_diagnoses_df, on=["sample_date_string"], how="outer")
result_summary_df.head()
# + papermill={"duration": 0.063232, "end_time": "2020-10-26T23:27:00.642850", "exception": false, "start_time": "2020-10-26T23:27:00.579618", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_summary_df = confirmed_df.tail(daily_summary_days).merge(
result_summary_df, on=["sample_date_string"], how="left")
result_summary_df.head()
# + papermill={"duration": 0.069906, "end_time": "2020-10-26T23:27:00.759592", "exception": false, "start_time": "2020-10-26T23:27:00.689686", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_summary_df["sample_date"] = pd.to_datetime(result_summary_df.sample_date_string)
result_summary_df.set_index("sample_date", inplace=True)
result_summary_df.drop(columns=["sample_date_string"], inplace=True)
result_summary_df.sort_index(ascending=False, inplace=True)
result_summary_df.head()
# + papermill={"duration": 0.066636, "end_time": "2020-10-26T23:27:00.876779", "exception": false, "start_time": "2020-10-26T23:27:00.810143", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
with pd.option_context("mode.use_inf_as_na", True):
result_summary_df = result_summary_df.fillna(0).astype(int)
result_summary_df["teks_per_shared_diagnosis"] = \
(result_summary_df.shared_teks_by_upload_date / result_summary_df.shared_diagnoses).fillna(0)
result_summary_df["shared_diagnoses_per_covid_case"] = \
(result_summary_df.shared_diagnoses / result_summary_df.covid_cases).fillna(0)
result_summary_df.head(daily_plot_days)
# + papermill={"duration": 0.072164, "end_time": "2020-10-26T23:27:01.000698", "exception": false, "start_time": "2020-10-26T23:27:00.928534", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
weekly_result_summary_df = result_summary_df \
.sort_index(ascending=True).fillna(0).rolling(7).agg({
"covid_cases": "sum",
"shared_teks_by_generation_date": "sum",
"shared_teks_by_upload_date": "sum",
"shared_diagnoses": "sum"
}).sort_index(ascending=False)
with pd.option_context("mode.use_inf_as_na", True):
weekly_result_summary_df = weekly_result_summary_df.fillna(0).astype(int)
weekly_result_summary_df["teks_per_shared_diagnosis"] = \
(weekly_result_summary_df.shared_teks_by_upload_date / weekly_result_summary_df.shared_diagnoses).fillna(0)
weekly_result_summary_df["shared_diagnoses_per_covid_case"] = \
(weekly_result_summary_df.shared_diagnoses / weekly_result_summary_df.covid_cases).fillna(0)
weekly_result_summary_df.head()
# + papermill={"duration": 0.061234, "end_time": "2020-10-26T23:27:01.112130", "exception": false, "start_time": "2020-10-26T23:27:01.050896", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
last_7_days_summary = weekly_result_summary_df.to_dict(orient="records")[1]
last_7_days_summary
# + [markdown] papermill={"duration": 0.048268, "end_time": "2020-10-26T23:27:01.209965", "exception": false, "start_time": "2020-10-26T23:27:01.161697", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ## Report Results
# + papermill={"duration": 0.055148, "end_time": "2020-10-26T23:27:01.314345", "exception": false, "start_time": "2020-10-26T23:27:01.259197", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
display_column_name_mapping = {
"sample_date": "Sample\u00A0Date\u00A0(UTC)",
"datetime_utc": "Timestamp (UTC)",
"upload_date": "Upload Date (UTC)",
"generation_to_upload_days": "Generation to Upload Period in Days",
"region": "Backend",
"region_x": "Backend\u00A0(A)",
"region_y": "Backend\u00A0(B)",
"common_teks": "Common TEKs Shared Between Backends",
"common_teks_fraction": "Fraction of TEKs in Backend (A) Available in Backend (B)",
"covid_cases": "COVID-19 Cases in Source Countries (7-day Rolling Average)",
"shared_teks_by_generation_date": "Shared TEKs by Generation Date",
"shared_teks_by_upload_date": "Shared TEKs by Upload Date",
"shared_diagnoses": "Shared Diagnoses (Estimation)",
"teks_per_shared_diagnosis": "TEKs Uploaded per Shared Diagnosis",
"shared_diagnoses_per_covid_case": "Usage Ratio (Fraction of Cases in Source Countries Which Shared Diagnosis)",
"shared_teks_uploaded_on_generation_date": "Shared TEKs Uploaded on Generation Date",
}
# + papermill={"duration": 0.052324, "end_time": "2020-10-26T23:27:01.412795", "exception": false, "start_time": "2020-10-26T23:27:01.360471", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
summary_columns = [
"covid_cases",
"shared_teks_by_generation_date",
"shared_teks_by_upload_date",
"shared_teks_uploaded_on_generation_date",
"shared_diagnoses",
"teks_per_shared_diagnosis",
"shared_diagnoses_per_covid_case",
]
# + [markdown] papermill={"duration": 0.052627, "end_time": "2020-10-26T23:27:01.514472", "exception": false, "start_time": "2020-10-26T23:27:01.461845", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Daily Summary Table
# + papermill={"duration": 0.067524, "end_time": "2020-10-26T23:27:01.630991", "exception": false, "start_time": "2020-10-26T23:27:01.563467", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_summary_df_ = result_summary_df.copy()
result_summary_df = result_summary_df[summary_columns]
result_summary_with_display_names_df = result_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
result_summary_with_display_names_df
# + [markdown] papermill={"duration": 0.047856, "end_time": "2020-10-26T23:27:01.729686", "exception": false, "start_time": "2020-10-26T23:27:01.681830", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Daily Summary Plots
# + papermill={"duration": 1.407064, "end_time": "2020-10-26T23:27:03.188282", "exception": false, "start_time": "2020-10-26T23:27:01.781218", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
result_plot_summary_df = result_summary_df.head(daily_plot_days)[summary_columns] \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping)
summary_ax_list = result_plot_summary_df.sort_index(ascending=True).plot.bar(
title=f"Daily Summary",
rot=45, subplots=True, figsize=(15, 22), legend=False)
ax_ = summary_ax_list[-1]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.95)
ax_.yaxis.set_major_formatter(matplotlib.ticker.PercentFormatter(1.0))
_ = ax_.set_xticklabels(sorted(result_plot_summary_df.index.strftime("%Y-%m-%d").tolist()))
# + [markdown] papermill={"duration": 0.058438, "end_time": "2020-10-26T23:27:03.298767", "exception": false, "start_time": "2020-10-26T23:27:03.240329", "status": "completed"} tags=[]
# ### Daily Generation to Upload Period Table
# + papermill={"duration": 0.071683, "end_time": "2020-10-26T23:27:03.429373", "exception": false, "start_time": "2020-10-26T23:27:03.357690", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
display_generation_to_upload_period_pivot_df = \
generation_to_upload_period_pivot_df \
.head(backend_generation_days)
display_generation_to_upload_period_pivot_df \
.head(backend_generation_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping)
# + papermill={"duration": 0.751942, "end_time": "2020-10-26T23:27:04.231723", "exception": false, "start_time": "2020-10-26T23:27:03.479781", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
fig, generation_to_upload_period_pivot_table_ax = plt.subplots(
figsize=(10, 1 + 0.5 * len(display_generation_to_upload_period_pivot_df)))
generation_to_upload_period_pivot_table_ax.set_title(
"Shared TEKs Generation to Upload Period Table")
sns.heatmap(
data=display_generation_to_upload_period_pivot_df
.rename_axis(columns=display_column_name_mapping)
.rename_axis(index=display_column_name_mapping),
fmt=".0f",
annot=True,
ax=generation_to_upload_period_pivot_table_ax)
generation_to_upload_period_pivot_table_ax.get_figure().tight_layout()
# + [markdown] papermill={"duration": 0.05667, "end_time": "2020-10-26T23:27:04.347436", "exception": false, "start_time": "2020-10-26T23:27:04.290766", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Hourly Summary Plots
# + papermill={"duration": 0.44398, "end_time": "2020-10-26T23:27:04.844237", "exception": false, "start_time": "2020-10-26T23:27:04.400257", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
hourly_summary_ax_list = hourly_summary_df \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.plot.bar(
title=f"Last 24h Summary",
rot=45, subplots=True, legend=False)
ax_ = hourly_summary_ax_list[-1]
ax_.get_figure().tight_layout()
ax_.get_figure().subplots_adjust(top=0.9)
_ = ax_.set_xticklabels(sorted(hourly_summary_df.index.strftime("%Y-%m-%d@%H").tolist()))
# + [markdown] papermill={"duration": 0.056468, "end_time": "2020-10-26T23:27:04.956096", "exception": false, "start_time": "2020-10-26T23:27:04.899628", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Publish Results
# + papermill={"duration": 0.067667, "end_time": "2020-10-26T23:27:05.080657", "exception": false, "start_time": "2020-10-26T23:27:05.012990", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
def get_temporary_image_path() -> str:
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()) + ".png")
def save_temporary_plot_image(ax):
if isinstance(ax, np.ndarray):
ax = ax[0]
media_path = get_temporary_image_path()
ax.get_figure().savefig(media_path)
return media_path
def save_temporary_dataframe_image(df):
import dataframe_image as dfi
media_path = get_temporary_image_path()
dfi.export(df, media_path)
return media_path
# + papermill={"duration": 0.089079, "end_time": "2020-10-26T23:27:05.226106", "exception": false, "start_time": "2020-10-26T23:27:05.137027", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
github_repository = os.environ.get("GITHUB_REPOSITORY")
if github_repository is None:
github_repository = "pvieito/Radar-STATS"
github_project_base_url = "https://github.com/" + github_repository
display_formatters = {
display_column_name_mapping["teks_per_shared_diagnosis"]: lambda x: f"{x:.2f}",
display_column_name_mapping["shared_diagnoses_per_covid_case"]: lambda x: f"{x:.2%}",
}
daily_summary_table_html = result_summary_with_display_names_df \
.head(daily_plot_days) \
.rename_axis(index=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.to_html(formatters=display_formatters)
multi_backend_summary_table_html = multi_backend_summary_df \
.head(daily_plot_days) \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(formatters=display_formatters)
def format_multi_backend_cross_sharing_fraction(x):
if pd.isna(x):
return "-"
elif round(x * 100, 1) == 0:
return ""
else:
return f"{x:.1%}"
multi_backend_cross_sharing_summary_table_html = multi_backend_cross_sharing_summary_df \
.rename_axis(columns=display_column_name_mapping) \
.rename(columns=display_column_name_mapping) \
.rename_axis(index=display_column_name_mapping) \
.to_html(
classes="table-center",
formatters=display_formatters,
float_format=format_multi_backend_cross_sharing_fraction)
multi_backend_cross_sharing_summary_table_html = \
multi_backend_cross_sharing_summary_table_html \
.replace("<tr>","<tr style=\"text-align: center;\">")
extraction_date_result_summary_df = \
result_summary_df[result_summary_df.index == extraction_date]
extraction_date_result_hourly_summary_df = \
hourly_summary_df[hourly_summary_df.extraction_date_with_hour == extraction_date_with_hour]
covid_cases = \
extraction_date_result_summary_df.covid_cases.sum()
shared_teks_by_generation_date = \
extraction_date_result_summary_df.shared_teks_by_generation_date.sum()
shared_teks_by_upload_date = \
extraction_date_result_summary_df.shared_teks_by_upload_date.sum()
shared_diagnoses = \
extraction_date_result_summary_df.shared_diagnoses.sum()
teks_per_shared_diagnosis = \
extraction_date_result_summary_df.teks_per_shared_diagnosis.sum()
shared_diagnoses_per_covid_case = \
extraction_date_result_summary_df.shared_diagnoses_per_covid_case.sum()
shared_teks_by_upload_date_last_hour = \
extraction_date_result_hourly_summary_df.shared_teks_by_upload_date.sum().astype(int)
shared_diagnoses_last_hour = \
extraction_date_result_hourly_summary_df.shared_diagnoses.sum().astype(int)
display_source_regions = ", ".join(report_source_regions)
display_brief_source_regions_limit = 2
if len(report_source_regions) <= display_brief_source_regions_limit:
display_brief_source_regions = display_source_regions
else:
prefix_countries = ", ".join(report_source_regions[:display_brief_source_regions_limit])
display_brief_source_regions = f"{len(report_source_regions)} ({prefix_countries}…)"
# + papermill={"duration": 6.263312, "end_time": "2020-10-26T23:27:11.548952", "exception": false, "start_time": "2020-10-26T23:27:05.285640", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
summary_plots_image_path = save_temporary_plot_image(
ax=summary_ax_list)
summary_table_image_path = save_temporary_dataframe_image(
df=result_summary_with_display_names_df)
hourly_summary_plots_image_path = save_temporary_plot_image(
ax=hourly_summary_ax_list)
multi_backend_summary_table_image_path = save_temporary_dataframe_image(
df=multi_backend_summary_df)
generation_to_upload_period_pivot_table_image_path = save_temporary_plot_image(
ax=generation_to_upload_period_pivot_table_ax)
# + [markdown] papermill={"duration": 0.061076, "end_time": "2020-10-26T23:27:11.672135", "exception": false, "start_time": "2020-10-26T23:27:11.611059", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Save Results
# + papermill={"duration": 0.086428, "end_time": "2020-10-26T23:27:11.815579", "exception": false, "start_time": "2020-10-26T23:27:11.729151", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
report_resources_path_prefix = "Data/Resources/Current/RadarCOVID-Report-"
result_summary_df.to_csv(
report_resources_path_prefix + "Summary-Table.csv")
result_summary_df.to_html(
report_resources_path_prefix + "Summary-Table.html")
hourly_summary_df.to_csv(
report_resources_path_prefix + "Hourly-Summary-Table.csv")
multi_backend_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Summary-Table.csv")
multi_backend_cross_sharing_summary_df.to_csv(
report_resources_path_prefix + "Multi-Backend-Cross-Sharing-Summary-Table.csv")
generation_to_upload_period_pivot_df.to_csv(
report_resources_path_prefix + "Generation-Upload-Period-Table.csv")
_ = shutil.copyfile(
summary_plots_image_path,
report_resources_path_prefix + "Summary-Plots.png")
_ = shutil.copyfile(
summary_table_image_path,
report_resources_path_prefix + "Summary-Table.png")
_ = shutil.copyfile(
hourly_summary_plots_image_path,
report_resources_path_prefix + "Hourly-Summary-Plots.png")
_ = shutil.copyfile(
multi_backend_summary_table_image_path,
report_resources_path_prefix + "Multi-Backend-Summary-Table.png")
_ = shutil.copyfile(
generation_to_upload_period_pivot_table_image_path,
report_resources_path_prefix + "Generation-Upload-Period-Table.png")
# + [markdown] papermill={"duration": 0.054033, "end_time": "2020-10-26T23:27:11.928436", "exception": false, "start_time": "2020-10-26T23:27:11.874403", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Publish Results as JSON
# + papermill={"duration": 0.068927, "end_time": "2020-10-26T23:27:12.050011", "exception": false, "start_time": "2020-10-26T23:27:11.981084", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
summary_results_api_df = result_summary_df.reset_index()
summary_results_api_df["sample_date_string"] = \
summary_results_api_df["sample_date"].dt.strftime("%Y-%m-%d")
summary_results = dict(
source_regions=report_source_regions,
extraction_datetime=extraction_datetime,
extraction_date=extraction_date,
extraction_date_with_hour=extraction_date_with_hour,
last_hour=dict(
shared_teks_by_upload_date=shared_teks_by_upload_date_last_hour,
shared_diagnoses=shared_diagnoses_last_hour,
),
today=dict(
covid_cases=covid_cases,
shared_teks_by_generation_date=shared_teks_by_generation_date,
shared_teks_by_upload_date=shared_teks_by_upload_date,
shared_diagnoses=shared_diagnoses,
teks_per_shared_diagnosis=teks_per_shared_diagnosis,
shared_diagnoses_per_covid_case=shared_diagnoses_per_covid_case,
),
last_7_days=last_7_days_summary,
daily_results=summary_results_api_df.to_dict(orient="records"))
summary_results = \
json.loads(pd.Series([summary_results]).to_json(orient="records"))[0]
with open(report_resources_path_prefix + "Summary-Results.json", "w") as f:
json.dump(summary_results, f, indent=4)
# + [markdown] papermill={"duration": 0.052949, "end_time": "2020-10-26T23:27:12.155259", "exception": false, "start_time": "2020-10-26T23:27:12.102310", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Publish on README
# + papermill={"duration": 0.062999, "end_time": "2020-10-26T23:27:12.276074", "exception": false, "start_time": "2020-10-26T23:27:12.213075", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
with open("Data/Templates/README.md", "r") as f:
readme_contents = f.read()
readme_contents = readme_contents.format(
extraction_date_with_hour=extraction_date_with_hour,
github_project_base_url=github_project_base_url,
daily_summary_table_html=daily_summary_table_html,
multi_backend_summary_table_html=multi_backend_summary_table_html,
multi_backend_cross_sharing_summary_table_html=multi_backend_cross_sharing_summary_table_html,
display_source_regions=display_source_regions)
with open("README.md", "w") as f:
f.write(readme_contents)
# + [markdown] papermill={"duration": 0.058067, "end_time": "2020-10-26T23:27:12.387203", "exception": false, "start_time": "2020-10-26T23:27:12.329136", "status": "completed"} pycharm={"name": "#%% md\n"} tags=[]
# ### Publish on Twitter
# + papermill={"duration": 8.661772, "end_time": "2020-10-26T23:27:21.101581", "exception": false, "start_time": "2020-10-26T23:27:12.439809", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
enable_share_to_twitter = os.environ.get("RADARCOVID_REPORT__ENABLE_PUBLISH_ON_TWITTER")
github_event_name = os.environ.get("GITHUB_EVENT_NAME")
if enable_share_to_twitter and github_event_name == "schedule":
import tweepy
twitter_api_auth_keys = os.environ["RADARCOVID_REPORT__TWITTER_API_AUTH_KEYS"]
twitter_api_auth_keys = twitter_api_auth_keys.split(":")
auth = tweepy.OAuthHandler(twitter_api_auth_keys[0], twitter_api_auth_keys[1])
auth.set_access_token(twitter_api_auth_keys[2], twitter_api_auth_keys[3])
api = tweepy.API(auth)
summary_plots_media = api.media_upload(summary_plots_image_path)
summary_table_media = api.media_upload(summary_table_image_path)
generation_to_upload_period_pivot_table_image_media = api.media_upload(generation_to_upload_period_pivot_table_image_path)
media_ids = [
summary_plots_media.media_id,
summary_table_media.media_id,
generation_to_upload_period_pivot_table_image_media.media_id,
]
if are_today_results_partial:
today_addendum = " (Partial)"
else:
today_addendum = ""
status = textwrap.dedent(f"""
#RadarCOVID – {extraction_date_with_hour}
Countries: {display_brief_source_regions}
Today{today_addendum}:
- Uploaded TEKs: {shared_teks_by_upload_date:.0f} ({shared_teks_by_upload_date_last_hour:+d} last hour)
- Shared Diagnoses: ≤{shared_diagnoses:.0f} ({shared_diagnoses_last_hour:+d} last hour)
- Usage Ratio: ≤{shared_diagnoses_per_covid_case:.2%}
Last 7 Days:
- Shared Diagnoses: ≤{last_7_days_summary["shared_diagnoses"]:.0f}
- Usage Ratio: ≤{last_7_days_summary["shared_diagnoses_per_covid_case"]:.2%}
Info: {github_project_base_url}#documentation
""")
status = status.encode(encoding="utf-8")
api.update_status(status=status, media_ids=media_ids)
# + papermill={"duration": 0.061505, "end_time": "2020-10-26T23:27:21.221775", "exception": false, "start_time": "2020-10-26T23:27:21.160270", "status": "completed"} pycharm={"name": "#%%\n"} tags=[]
| Notebooks/RadarCOVID-Report/Daily/RadarCOVID-Report-2020-10-26.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pickle
from knn import ProjectKNN
from xgmodel import ProjectXGBOOST
from mlp import ProjectMLP, MLP
import torch
import pandas as pd
with open('knn.bin', 'rb') as fiile:
knn = pickle.load(fiile)
knn.test_arx('test.csv')
with open('xgboost.bin', 'rb') as fiile:
xgboost = pickle.load(fiile)
xgboost.test_arx('test.csv')
# +
with open('mlp.bin', 'rb') as fiile:
mlp = pickle.load(fiile)
test = pd.read_csv('test.csv')
x_batch = test[[
x for x in test.columns if x not in ('lat', 'lon', 'pontoId')]].values
x_batch = torch.tensor(x_batch).float().cuda()
mlp.model_x.eval()
out_x = mlp.model_x(x_batch)
out_x = out_x.detach().cpu().numpy()
mlp.model_y.eval()
out_y = mlp.model_y(x_batch)
out_y = out_y.detach().cpu().numpy()
df = pd.DataFrame()
df['pontoId'] = test.pontoId
df['lat_pred'] = out_x
df['lon_pred'] = out_y
df.to_csv('Resultados_Equipe3_Metodo_MLP.csv', index=False)
# -
| projeto.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Custom components
#
# As I mentioned earlier in the example notebooks, and also in the `README`, it is possible to customise almost every component in `pytorch-widedeep`.
#
# Let's now go through a couple of simple example to illustrate how that could be done.
#
# First let's load and process the data "as usual", let's start with a regression and the [airbnb](http://insideairbnb.com/get-the-data.html) dataset.
# +
import numpy as np
import pandas as pd
import os
import torch
from pytorch_widedeep import Trainer
from pytorch_widedeep.preprocessing import WidePreprocessor, TabPreprocessor, TextPreprocessor, ImagePreprocessor
from pytorch_widedeep.models import Wide, TabMlp, TabResnet, DeepText, DeepImage, WideDeep
from pytorch_widedeep.losses import RMSELoss
from pytorch_widedeep.initializers import *
from pytorch_widedeep.callbacks import *
# -
df = pd.read_csv('data/airbnb/airbnb_sample.csv')
df.head()
# There are a number of columns that are already binary. Therefore, no need to one hot encode them
crossed_cols = [('property_type', 'room_type')]
already_dummies = [c for c in df.columns if 'amenity' in c] + ['has_house_rules']
wide_cols = ['is_location_exact', 'property_type', 'room_type', 'host_gender',
'instant_bookable'] + already_dummies
cat_embed_cols = [(c, 16) for c in df.columns if 'catg' in c] + \
[('neighbourhood_cleansed', 64), ('cancellation_policy', 16)]
continuous_cols = ['latitude', 'longitude', 'security_deposit', 'extra_people']
# it does not make sense to standarised Latitude and Longitude
already_standard = ['latitude', 'longitude']
# text and image colnames
text_col = 'description'
img_col = 'id'
# path to pretrained word embeddings and the images
word_vectors_path = 'data/glove.6B/glove.6B.100d.txt'
img_path = 'data/airbnb/property_picture'
# target
target_col = 'yield'
target = df[target_col].values
# +
wide_preprocessor = WidePreprocessor(wide_cols=wide_cols, crossed_cols=crossed_cols)
X_wide = wide_preprocessor.fit_transform(df)
tab_preprocessor = TabPreprocessor(embed_cols=cat_embed_cols, continuous_cols=continuous_cols)
X_tab = tab_preprocessor.fit_transform(df)
text_preprocessor = TextPreprocessor(word_vectors_path=word_vectors_path, text_col=text_col)
X_text = text_preprocessor.fit_transform(df)
image_processor = ImagePreprocessor(img_col = img_col, img_path = img_path)
X_images = image_processor.fit_transform(df)
# -
# Now we are ready to build a wide and deep model. Three of the four components we will use are included in this package, and they will be combined with a custom `deeptext` component. Then the fit process will run with a custom loss function.
#
# Let's have a look
# +
# Linear model
wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=1)
# DeepDense: 2 Dense layers
deeptabular = TabMlp(
column_idx = tab_preprocessor.column_idx,
mlp_hidden_dims=[128,64],
mlp_dropout = 0.1,
mlp_batchnorm = True,
embed_input=tab_preprocessor.embeddings_input,
embed_dropout = 0.1,
continuous_cols = continuous_cols,
cont_norm_layer = "batchnorm"
)
# Pretrained Resnet 18 (default is all but last 2 conv blocks frozen) plus a FC-Head 512->256->128
deepimage = DeepImage(pretrained=True, head_hidden_dims=[512, 256, 128])
# -
# ### Custom `deeptext`
#
# Standard Pytorch model
class MyDeepText(nn.Module):
def __init__(self, vocab_size, padding_idx=1, embed_dim=100, hidden_dim=64):
super(MyDeepText, self).__init__()
# word/token embeddings
self.word_embed = nn.Embedding(
vocab_size, embed_dim, padding_idx=padding_idx
)
# stack of RNNs
self.rnn = nn.GRU(
embed_dim,
hidden_dim,
num_layers=2,
bidirectional=True,
batch_first=True,
)
# Remember, this must be defined. If not WideDeep will through an error
self.output_dim = hidden_dim * 2
def forward(self, X):
embed = self.word_embed(X.long())
o, h = self.rnn(embed)
return torch.cat((h[-2], h[-1]), dim=1)
mydeeptext = MyDeepText(vocab_size=len(text_preprocessor.vocab.itos))
model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=mydeeptext, deepimage=deepimage)
# ### Custom loss function
#
# Loss functions must simply inherit pytorch's `nn.Module`. For example, let's say we want to use `RMSE` (note that this is already available in the package, but I will pass it here as a custom loss for illustration purposes)
class RMSELoss(nn.Module):
def __init__(self):
"""root mean squared error"""
super().__init__()
self.mse = nn.MSELoss()
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return torch.sqrt(self.mse(input, target))
# and now we just instantiate the ``Trainer`` as usual. Needless to say, but this runs with 1000 random observations, so loss and metric values are meaningless. This is just an example
trainer = Trainer(model, objective='regression', custom_loss_function=RMSELoss())
trainer.fit(X_wide=X_wide, X_tab=X_tab, X_text=X_text, X_img=X_images,
target=target, n_epochs=1, batch_size=32, val_split=0.2)
# In addition to model components and loss functions, we can also use custom callbacks or custom metrics. The former need to be of type `Callback` and the latter need to be of type `Metric`. See:
#
# ```python
# pytorch-widedeep.callbacks
# ```
# and
#
# ```python
# pytorch-widedeep.metrics
# ```
#
# For this example let me use the adult dataset. Again, we first prepare the data as usual
df = pd.read_csv('data/adult/adult.csv.zip')
df.head()
# For convenience, we'll replace '-' with '_'
df.columns = [c.replace("-", "_") for c in df.columns]
# binary target
df['income_label'] = (df["income"].apply(lambda x: ">50K" in x)).astype(int)
df.drop('income', axis=1, inplace=True)
df.head()
wide_cols = ['education', 'relationship','workclass','occupation','native_country','gender']
crossed_cols = [('education', 'occupation'), ('native_country', 'occupation')]
cat_embed_cols = [('education',16), ('relationship',8), ('workclass',16), ('occupation',16),('native_country',16)]
continuous_cols = ["age","hours_per_week"]
target_col = 'income_label'
# +
# TARGET
target = df[target_col].values
# wide
wide_preprocessor = WidePreprocessor(wide_cols=wide_cols, crossed_cols=crossed_cols)
X_wide = wide_preprocessor.fit_transform(df)
# deeptabular
tab_preprocessor = TabPreprocessor(embed_cols=cat_embed_cols, continuous_cols=continuous_cols)
X_tab = tab_preprocessor.fit_transform(df)
# -
wide = Wide(wide_dim=np.unique(X_wide).shape[0], pred_dim=1)
deeptabular = TabMlp(mlp_hidden_dims=[64,32],
column_idx=tab_preprocessor.column_idx,
embed_input=tab_preprocessor.embeddings_input,
continuous_cols=continuous_cols)
model = WideDeep(wide=wide, deeptabular=deeptabular)
# ### Custom metric
#
# Let's say we want to use our own accuracy metric (again, this is already available in the package, but I will pass it here as a custom loss for illustration purposes).
#
# This could be done as:
from pytorch_widedeep.metrics import Metric
class Accuracy(Metric):
def __init__(self, top_k: int = 1):
super(Accuracy, self).__init__()
self.top_k = top_k
self.correct_count = 0
self.total_count = 0
# metric name needs to be defined
self._name = "acc"
def reset(self):
self.correct_count = 0
self.total_count = 0
def __call__(self, y_pred: Tensor, y_true: Tensor) -> np.ndarray:
num_classes = y_pred.size(1)
if num_classes == 1:
y_pred = y_pred.round()
y_true = y_true
elif num_classes > 1:
y_pred = y_pred.topk(self.top_k, 1)[1]
y_true = y_true.view(-1, 1).expand_as(y_pred)
self.correct_count += y_pred.eq(y_true).sum().item()
self.total_count += len(y_pred)
accuracy = float(self.correct_count) / float(self.total_count)
return np.array(accuracy)
# ### Custom Callback
#
# Let's code a callback that records the current epoch at the beginning and the end of each epoch (silly, but you know, this is just an example)
# have a look to the class
from pytorch_widedeep.callbacks import Callback
class SillyCallback(Callback):
def on_train_begin(self, logs = None):
# recordings will be the trainer object attributes
self.trainer.silly_callback = {}
self.trainer.silly_callback['beginning'] = []
self.trainer.silly_callback['end'] = []
def on_epoch_begin(self, epoch, logs=None):
self.trainer.silly_callback['beginning'].append(epoch+1)
def on_epoch_end(self, epoch, logs=None, metric=None):
self.trainer.silly_callback['end'].append(epoch+1)
# and now, as usual:
trainer = Trainer(model, objective='binary', metrics=[Accuracy], callbacks=[SillyCallback])
trainer.fit(X_wide=X_wide, X_tab=X_tab, target=target, n_epochs=5, batch_size=64, val_split=0.2)
trainer.silly_callback
| examples/07_Custom_Components.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # jupyter notebook生成PDF文件采用LaTeX编译方式,须先行安装Tex软件。(Linux环境建议使用TeXLive, Windows环境建议使用MiKTeX,Mac环境建议使用MacTeX)
# - 建议直接前往 [https://tug.org/](https://tug.org/) 下载最新版本TeX软件,TeXLive软件下载与安装链接地址为:[https://tug.org/texlive/](https://tug.org/texlive/)。
#
# - Ubuntu下用apt-get获取的TeXLive软件版本较旧,插件不齐,在采用tlmgr包管理升级时会遇到版本不兼容,无法升级的问题。
# # 由于jupyter notebook自身模板限制,PDF转换模板中默认使用LaTeX的标准T1字体输出(不支持中文)。因此,如要输出包含中文的PDF文件,则需要按以下步骤执行:
# - 在jupyter notebook环境下先把ipynb工作文件输出为LaTeX(.tex)文件。(在此假定输出文件名以*sample.tex*)
#
# - 用文本编辑器打开*sample.tex*。
#
# - 在**\documentclass**段落之下新增以下语句,引入xeCJK包,实现中文支持。
#
# ```
# \usepackage{xeCJK}
# ```
#
# - 保存文件后,使用`xelatex sample.tex`即可编译生成支持中文的 *sample.pdf* 文件。
#
# - 在上面的语句后,可添加以下语句设置宋体为生成PDF文件的缺省中文字体,不作设置的话将以系统字体为准。
#
# ```
# \setCJKmainfont{SimSun}
# ```
# *注:*
#
# - *测试环境为:Ubuntu 19.04 + TexLive 2019(缺省安装已包含xeCJK, cTex中文宏包)。*
#
# - *xeCJK中文支持包具体使用方法参见说明文件 [xeCJK.pdf](xeCJK.pdf)。*
#
# - *本文即为按以上方法生成的PDF中文文档。*
| docs/IMPOART_NOTICE_FOR_CHINESE_PDF_DOCUMENT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **interval starts from 0**
#
# i.e. 0 is 0:00-3:59, 5 is 20:00-23:59
import pandas as pd
# a way without defining a function
#just copy and paste, and change 'df' and the 'pickup_datetime' to the name of your dataframe and datetime column in the dataframe
df['interval'] = (pd.to_datetime(df['pickup_datetime']).dt.hour)//4
# a function way (essentially the same as the previous one)
# +
def time2interval(df, colname):
# Input a dataframe and a datetime column, add another 'interval' column to the given dateframe
# df: input dataframe
# colname: name of the column which contains time
df['interval'] = (pd.to_datetime(df[colname]).dt.hour)//4
| .ipynb_checkpoints/time2interval-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# For convenience, let's begin by enabling [automatic reloading of modules](https://ipython.readthedocs.io/en/stable/config/extensions/autoreload.html?highlight=autoreload) when they change.
# + tags=[]
# %load_ext autoreload
# %autoreload 2
# -
# # Import Qiskit Metal
# +
import qiskit_metal as metal
from qiskit_metal import designs, draw
from qiskit_metal import MetalGUI, Dict, Headings
from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket
from qiskit_metal.qlibrary.qubits.transmon_cross import TransmonCross
from qiskit_metal.renderers.renderer_gds.gds_renderer import QGDSRenderer
# -
Headings.h1('The default_options in a QComponent are different than the default_options in QRenderers.')
# ![QDesign Data Flow_accurate.jpg](attachment:a3b155b8-23ee-493a-b9d2-40e2d51aef9b.jpg)
TransmonPocket.default_options
QGDSRenderer.default_options
# ## A renderer needs to inherent from QRenderer.
# For Example, QGDSRender inherents from QRenderer.
#
# When any QRenderer is registered within QDesign, the QRenderer instance has options, which hold the latest set of values for default_options. The GUI can also update these options.
#
# An example of updating options is further below in this notebook.
#
# ## A user can customize things two ways.
#
# 1. Directly update the options that originated from default_options, for either QComponent or QRenderer.
#
# 2. Pass options to a QComponent which will be placed in a QGeometry table, then used by QRenderer.
# ## How to get options from QRenderer to be placed within the QGeometry table?
# We set this up so that older QComponents can be agnostic of newer QRenderers.
#
# The "rate limiting factor" is to have QComponent denote in it's metadata, which QGeometry tables it will write to. For this example, we will discuss the "junction" table. More details will be in the notebook at "tutorials/4 Plugin Developer".
# If the QComponent identifies the table which it is aware of, and if QGDSRenderer wants to add a column to the table with a default value, then QComponent will pass the option from QGDSRenderer to QGeometry table without doing anything with it.
#
# An example of this below is `gds_cell_name='FakeJunction_01'`. This is passed through to QGeometry, when a QComponent is instantiated. The QGDSRenderer has a default, which is not editable during run-time, but can be customized when a QComponent is instantiated.
Headings.h1('How does a QRenderer get registered within QDesign?')
# + [markdown] tags=[]
# ## By default, QRenderers are registered within QDesign during init QDesign.
# The list of QRenderers which will be registered are in qiskit_metal.config.py;
# the dictionary `renderers_to_load` has the name of the QRenderer (key), class name (value), and path (value).
#
# Presently, GDS and Ansys QRenderers are registered during init.
#
# -
design = designs.DesignPlanar()
# +
# Use GDS QRenderer for remaining examples. Can do similar things with Ansys QRenderer.
#an_ansys = design._renderers['ansys']
#an_ansys = design._renderers.ansys
#a_gds = design._renderers['gds']
a_gds = design._renderers.gds
# -
gui = MetalGUI(design)
design.overwrite_enabled = True
Headings.h1('Populate QDesign to demonstrate exporting to GDS format.')
# +
from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket
# Allow running the same cell here multiple times to overwrite changes
design.overwrite_enabled = True
## Custom options for all the transmons
options = dict(
# Some options we want to modify from the deafults
# (see below for defaults)
pad_width = '425 um',
pad_gap = '80 um',
pocket_height = '650um',
# Adding 4 connectors (see below for defaults)
connection_pads=dict(
a = dict(loc_W=+1,loc_H=+1),
b = dict(loc_W=-1,loc_H=+1, pad_height='30um'),
c = dict(loc_W=+1,loc_H=-1, pad_width='200um'),
d = dict(loc_W=-1,loc_H=-1, pad_height='50um')
)
)
# -
# # Note:
# The cell name denoted by, "gds_cell_name" will be the selected cell
# from design.renderers.gds.options['path_filename']
# when design.renderers.gds.export_to_gds() is executed.
# +
## Create 4 TransmonPockets
q1 = TransmonPocket(design, 'Q1', options = dict(
pos_x='+2.55mm', pos_y='+0.0mm', gds_cell_name='FakeJunction_02', **options))
q2 = TransmonPocket(design, 'Q2', options = dict(
pos_x='+0.0mm', pos_y='-0.9mm', orientation = '90', gds_cell_name='FakeJunction_02', **options))
q3 = TransmonPocket(design, 'Q3', options = dict(
pos_x='-2.55mm', pos_y='+0.0mm', gds_cell_name='FakeJunction_01',**options))
q4 = TransmonPocket(design, 'Q4', options = dict(
pos_x='+0.0mm', pos_y='+0.9mm', orientation = '90', gds_cell_name='my_other_junction', **options))
# + tags=[]
## Rebuild the design
gui.rebuild()
gui.autoscale()
#Connect using techniques explained earlier notebooks.
from qiskit_metal.qlibrary.interconnects.meandered import RouteMeander
RouteMeander.get_template_options(design)
options = Dict(
meander=Dict(
lead_start='0.1mm',
lead_end='0.1mm',
asymmetry='0 um')
)
def connect(component_name: str, component1: str, pin1: str, component2: str, pin2: str,
length: str, asymmetry='0 um', flip=False, fillet='50um'):
"""Connect two pins with a CPW."""
myoptions = Dict(
fillet=fillet,
pin_inputs=Dict(
start_pin=Dict(
component=component1,
pin=pin1),
end_pin=Dict(
component=component2,
pin=pin2)),
lead=Dict(
start_straight='0.13mm',
end_straight='0.13mm'
),
total_length=length)
myoptions.update(options)
myoptions.meander.asymmetry = asymmetry
myoptions.meander.lead_direction_inverted = 'true' if flip else 'false'
return RouteMeander(design, component_name, myoptions)
asym = 90
cpw1 = connect('cpw1', 'Q1', 'd', 'Q2', 'c', '5.7 mm', f'+{asym}um', fillet='25um')
cpw2 = connect('cpw2', 'Q3', 'c', 'Q2', 'a', '5.4 mm', f'-{asym}um', flip=True, fillet='100um')
cpw3 = connect('cpw3', 'Q3', 'a', 'Q4', 'b', '5.3 mm', f'+{asym}um', fillet='75um')
cpw4 = connect('cpw4', 'Q1', 'b', 'Q4', 'd', '5.5 mm', f'-{asym}um', flip=True)
gui.rebuild()
gui.autoscale()
# -
Headings.h1('Exporting a GDS file.')
# + tags=[]
#QDesign enables GDS renderer during init.
a_gds = design.renderers.gds
# An alternate way to envoke the gds commands without using a_gds:
# design.renderers.gds.export_to_gds()
#Show the options for GDS
a_gds.options
# -
# ### To made junction table work correctly, GDS Renderer needs correct path to gds file which has cells.
# Each cell is a junction to be placed in a Transmon. A sample gds file is provided in directory `qiskit_metal/tutorials/resources`.
# There are three cells with names "Fake_Junction_01", "Fake_Junction_01", and "my_other_junction".
# The default name used by GDS Render is "my_other_junction". If you want to customize and select a junction, through the options,
# you can pass it when a qcomponent is being added to QDesign.
a_gds.options['path_filename'] = '../../resources/Fake_Junctions.GDS'
# Do you want GDS Renderer to fix any short-segments in your QDesign when using fillet?'
#
#
#If you have a fillet_value and there are LineSegments that are shorter than 2*fillet_value,
#When true, the short segments will not be fillet'd.
a_gds.options['short_segments_to_not_fillet'] = 'True'
scale_fillet = 2.0
a_gds.options['check_short_segments_by_scaling_fillet'] = scale_fillet
# + tags=[]
# Export GDS file for all components in design.
#def export_to_gds(self, file_name: str, highlight_qcomponents: list = []) -> int:
# Please change the path where you want to write a GDS file.
#Examples below.
#a_gds.export_to_gds("../../../gds-files/GDS QRenderer Notebook.gds")
a_gds.export_to_gds('GDS QRenderer Notebook.gds')
# + tags=[]
# Export a GDS file which contains only few components.
# You will probably want to put the exported file in a specific directory.
# Please give the full path for output.
a_gds.export_to_gds("four_qcomponents.gds",
highlight_qcomponents=['cpw1', 'cpw4', 'Q1', 'Q3'])
# -
# ## How to "execute" exporting an QRenderer from GUI vs notebook?
# Within the GUI, there are icons: GDS, HFSS and Q3D.
#
# Example for GDS:
# Select the components that you want to export from QGeometry Tables. Select the path/file_name and the same thing should happen as the cells above.
Headings.h1('QUESTION: Where is the geometry of a QComponent placed?')
# # Answer: QGeometry tables!
# # What is QGeometry?
#
# ### All QRenderers use the QGeometry tables to export from QDesign. Each table is a Pandas DataFrame.
#
# We can get all the QGeometry of a QComponent. There are several kinds, such as `path`, `poly` and, `junction`.
# +
#Many ways to view the QGeometry tables.
#If you want to view, uncomment below lines and and run it.
#design.qgeometry.tables
#design.qgeometry.tables['path']
#design.qgeometry.tables['poly']
# -
design.qgeometry.tables['junction']
# ### Let us look at all the polygons used to create qubit `q1`
# Poly table hold the polygons identified from QComponents.
q1.qgeometry_table('poly')
# Paths are lines. These can have a width.
q1.qgeometry_table('path')
# ### The junction table is handled differently by each QRenderer.
#
# ### What does GDS do with "junction" table?
# This is better explained in folder 5 All QRenderers/5.2 GDS/GDS QRenderer notebook.
q1.qgeometry_table('junction')
# ### Geometric boundary of a QComponent?
# Return the boundry box of the geometry, for example: `q1.qgeometry_bounds()`.
# The function returns a tuple containing (minx, miny, maxx, maxy) bound values
# for the bounds of the component as a whole.
# + tags=[]
for name, qcomponent in design.components.items():
print(f"{name:10s} : {qcomponent.qgeometry_bounds()}")
# -
# # Qiskit Metal Version
# + tags=[]
metal.about();
# +
# gui.main_window.close()
| tutorials/2 Front End User/2.4 QRenderer Introduction/QRenderer Introduction.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.1.0
# language: julia
# name: julia-1.1
# ---
#
# <a id='orth-proj'></a>
# <div id="qe-notebook-header" style="text-align:right;">
# <a href="https://quantecon.org/" title="quantecon.org">
# <img style="width:250px;display:inline;" src="https://assets.quantecon.org/img/qe-menubar-logo.svg" alt="QuantEcon">
# </a>
# </div>
# # Orthogonal Projections and Their Applications
#
#
# <a id='index-0'></a>
# ## Contents
#
# - [Orthogonal Projections and Their Applications](#Orthogonal-Projections-and-Their-Applications)
# - [Overview](#Overview)
# - [Key Definitions](#Key-Definitions)
# - [The Orthogonal Projection Theorem](#The-Orthogonal-Projection-Theorem)
# - [Orthonormal Basis](#Orthonormal-Basis)
# - [Projection Using Matrix Algebra](#Projection-Using-Matrix-Algebra)
# - [Least Squares Regression](#Least-Squares-Regression)
# - [Orthogonalization and Decomposition](#Orthogonalization-and-Decomposition)
# - [Exercises](#Exercises)
# - [Solutions](#Solutions)
# ## Overview
#
# Orthogonal projection is a cornerstone of vector space methods, with many diverse applications
#
# These include, but are not limited to,
#
# - Least squares projection, also known as linear regression
# - Conditional expectations for multivariate normal (Gaussian) distributions
# - Gram–Schmidt orthogonalization
# - QR decomposition
# - Orthogonal polynomials
# - etc
#
#
# In this lecture we focus on
#
# - key ideas
# - least squares regression
# ### Further Reading
#
# For background and foundational concepts, see our lecture [on linear algebra](linear_algebra.html)
#
# For more proofs and greater theoretical detail, see [A Primer in Econometric Theory](http://www.johnstachurski.net/emet.html)
#
# For a complete set of proofs in a general setting, see, for example, [[Rom05]](zreferences.html#roman2005)
#
# For an advanced treatment of projection in the context of least squares prediction, see [this book chapter](http://www.tomsargent.com/books/TOMchpt.2.pdf)
# ## Key Definitions
#
# Assume $ x, z \in \mathbb{R}^n $
#
# Define $ \langle x, z\rangle = \sum_i x_i z_i $
#
# Recall $ \|x \|^2 = \langle x, x \rangle $
#
# The **law of cosines** states that $ \langle x, z \rangle = \| x \| \| z \| \cos(\theta) $ where $ \theta $ is the angle between the vectors $ x $ and $ z $
#
# When $ \langle x, z\rangle = 0 $, then $ \cos(\theta) = 0 $ and $ x $ and $ z $ are said to be **orthogonal** and we write $ x \perp z $
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/orth_proj_def1.png" style="width:50%;height:50%">
#
#
# For a linear subspace $ S \subset \mathbb{R}^n $, we call $ x \in \mathbb{R}^n $ **orthogonal to** $ S $ if $ x \perp z $ for all $ z \in S $, and write $ x \perp S $
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/orth_proj_def2.png" style="width:50%;height:50%">
#
#
# The **orthogonal complement** of linear subspace $ S \subset \mathbb{R}^n $ is the set $ S^{\perp} := \{x \in \mathbb{R}^n \,:\, x \perp S\} $
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/orth_proj_def3.png" style="width:50%;height:50%">
#
#
# $ S^\perp $ is a linear subspace of $ \mathbb{R}^n $
#
# - To see this, fix $ x, y \in S^{\perp} $ and $ \alpha, \beta \in \mathbb{R} $
# - Observe that if $ z \in S $, then
#
#
# $$
# \langle \alpha x + \beta y, z \rangle
# = \alpha \langle x, z \rangle + \beta \langle y, z \rangle
# = \alpha \times 0 + \beta \times 0 = 0
# $$
#
# - Hence $ \alpha x + \beta y \in S^{\perp} $, as was to be shown
#
#
# A set of vectors $ \{x_1, \ldots, x_k\} \subset \mathbb{R}^n $ is called an **orthogonal set** if $ x_i \perp x_j $ whenever $ i \not= j $
#
# If $ \{x_1, \ldots, x_k\} $ is an orthogonal set, then the **Pythagorean Law** states that
#
# $$
# \| x_1 + \cdots + x_k \|^2
# = \| x_1 \|^2 + \cdots + \| x_k \|^2
# $$
#
# For example, when $ k=2 $, $ x_1 \perp x_2 $ implies
#
# $$
# \| x_1 + x_2 \|^2
# = \langle x_1 + x_2, x_1 + x_2 \rangle
# = \langle x_1, x_1 \rangle + 2 \langle x_2, x_1 \rangle + \langle x_2, x_2 \rangle
# = \| x_1 \|^2 + \| x_2 \|^2
# $$
# ### Linear Independence vs Orthogonality
#
# If $ X \subset \mathbb{R}^n $ is an orthogonal set and $ 0 \notin X $, then $ X $ is linearly independent
#
# Proving this is a nice exercise
#
# While the converse is not true, a kind of partial converse holds, as we’ll [see below](#gram-schmidt)
# ## The Orthogonal Projection Theorem
#
# What vector within a linear subspace of $ \mathbb{R}^n $ best approximates a given vector in $ \mathbb{R}^n $?
#
# The next theorem provides answers this question
#
# **Theorem** (OPT) Given $ y \in \mathbb{R}^n $ and linear subspace $ S \subset \mathbb{R}^n $,
# there exists a unique solution to the minimization problem
#
# $$
# \hat y := \mathop{\mathrm{arg\,min}}_{z \in S} \|y - z\|
# $$
#
# The minimizer $ \hat y $ is the unique vector in $ \mathbb{R}^n $ that satisfies
#
# - $ \hat y \in S $
# - $ y - \hat y \perp S $
#
#
# The vector $ \hat y $ is called the **orthogonal projection** of $ y $ onto $ S $
#
# The next figure provides some intuition
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/orth_proj_thm1.png" style="width:50%;height:50%">
# ### Proof of sufficiency
#
# We’ll omit the full proof.
#
# But we will prove sufficiency of the asserted conditions
#
# To this end, let $ y \in \mathbb{R}^n $ and let $ S $ be a linear subspace of $ \mathbb{R}^n $
#
# Let $ \hat y $ be a vector in $ \mathbb{R}^n $ such that $ \hat y \in S $ and $ y - \hat y \perp S $
#
# Let $ z $ be any other point in $ S $ and use the fact that $ S $ is a linear subspace to deduce
#
# $$
# \| y - z \|^2
# = \| (y - \hat y) + (\hat y - z) \|^2
# = \| y - \hat y \|^2 + \| \hat y - z \|^2
# $$
#
# Hence $ \| y - z \| \geq \| y - \hat y \| $, which completes the proof
# ### Orthogonal Projection as a Mapping
#
# For a linear space $ Y $ and a fixed linear subspace $ S $, we have a functional relationship
#
# $$
# y \in Y\; \mapsto \text{ its orthogonal projection } \hat y \in S
# $$
#
# By the OPT, this is a well-defined mapping or *operator* from $ \mathbb{R}^n $ to $ \mathbb{R}^n $
#
# In what follows we denote this operator by a matrix $ P $
#
# - $ P y $ represents the projection $ \hat y $
# - This is sometimes expressed as $ \hat E_S y = P y $, where $ \hat E $ denotes a **wide-sense expectations operator** and the subscript $ S $ indicates that we are projecting $ y $ onto the linear subspace $ S $
#
#
# The operator $ P $ is called the **orthogonal projection mapping onto** $ S $
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/orth_proj_thm2.png" style="width:50%;height:50%">
#
#
# It is immediate from the OPT that for any $ y \in \mathbb{R}^n $
#
# 1. $ P y \in S $ and
# 1. $ y - P y \perp S $
#
#
# From this we can deduce additional useful properties, such as
#
# 1. $ \| y \|^2 = \| P y \|^2 + \| y - P y \|^2 $ and
# 1. $ \| P y \| \leq \| y \| $
#
#
# For example, to prove 1, observe that $ y = P y + y - P y $ and apply the Pythagorean law
# #### Orthogonal Complement
#
# Let $ S \subset \mathbb{R}^n $.
#
# The **orthogonal complement** of $ S $ is the linear subspace $ S^{\perp} $ that satisfies
# $ x_1 \perp x_2 $ for every $ x_1 \in S $ and $ x_2 \in S^{\perp} $
#
# Let $ Y $ be a linear space with linear subspace $ S $ and its orthogonal complement $ S^{\perp} $
#
# We write
#
# $$
# Y = S \oplus S^{\perp}
# $$
#
# to indicate that for every $ y \in Y $ there is unique $ x_1 \in S $ and a unique $ x_2 \in S^{\perp} $
# such that $ y = x_1 + x_2 $.
#
# Moreover, $ x_1 = \hat E_S y $ and $ x_2 = y - \hat E_S y $
#
# This amounts to another version of the OPT:
#
# **Theorem**. If $ S $ is a linear subspace of $ \mathbb{R}^n $, $ \hat E_S y = P y $ and $ \hat E_{S^{\perp}} y = M y $, then
#
# $$
# P y \perp M y
# \quad \text{and} \quad
# y = P y + M y
# \quad \text{for all } \, y \in \mathbb{R}^n
# $$
#
# The next figure illustrates
#
# <img src="https://s3-ap-southeast-2.amazonaws.com/lectures.quantecon.org/jl/_static/figures/orth_proj_thm3.png" style="width:50%;height:50%">
# ## Orthonormal Basis
#
# An orthogonal set of vectors $ O \subset \mathbb{R}^n $ is called an **orthonormal set** if $ \| u \| = 1 $ for all $ u \in O $
#
# Let $ S $ be a linear subspace of $ \mathbb{R}^n $ and let $ O \subset S $
#
# If $ O $ is orthonormal and $ \mathop{\mathrm{span}} O = S $, then $ O $ is called an **orthonormal basis** of $ S $
#
# $ O $ is necessarily a basis of $ S $ (being independent by orthogonality and the fact that no element is the zero vector)
#
# One example of an orthonormal set is the canonical basis $ \{e_1, \ldots, e_n\} $
# that forms an orthonormal basis of $ \mathbb{R}^n $, where $ e_i $ is the $ i $ th unit vector
#
# If $ \{u_1, \ldots, u_k\} $ is an orthonormal basis of linear subspace $ S $, then
#
# $$
# x = \sum_{i=1}^k \langle x, u_i \rangle u_i
# \quad \text{for all} \quad
# x \in S
# $$
#
# To see this, observe that since $ x \in \mathop{\mathrm{span}}\{u_1, \ldots, u_k\} $, we can find
# scalars $ \alpha_1, \ldots, \alpha_k $ that verify
#
#
# <a id='equation-pob'></a>
# $$
# x = \sum_{j=1}^k \alpha_j u_j \tag{1}
# $$
#
# Taking the inner product with respect to $ u_i $ gives
#
# $$
# \langle x, u_i \rangle
# = \sum_{j=1}^k \alpha_j \langle u_j, u_i \rangle
# = \alpha_i
# $$
#
# Combining this result with [(1)](#equation-pob) verifies the claim
# ### Projection onto an Orthonormal Basis
#
# When the subspace onto which are projecting is orthonormal, computing the projection simplifies:
#
# **Theorem** If $ \{u_1, \ldots, u_k\} $ is an orthonormal basis for $ S $, then
#
#
# <a id='equation-exp-for-op'></a>
# $$
# P y = \sum_{i=1}^k \langle y, u_i \rangle u_i,
# \quad
# \forall \; y \in \mathbb{R}^n \tag{2}
# $$
#
# Proof: Fix $ y \in \mathbb{R}^n $ and let $ P y $ be defined as in [(2)](#equation-exp-for-op)
#
# Clearly, $ P y \in S $
#
# We claim that $ y - P y \perp S $ also holds
#
# It sufficies to show that $ y - P y \perp $ any basis vector $ u_i $ (why?)
#
# This is true because
#
# $$
# \left\langle y - \sum_{i=1}^k \langle y, u_i \rangle u_i, u_j \right\rangle
# = \langle y, u_j \rangle - \sum_{i=1}^k \langle y, u_i \rangle
# \langle u_i, u_j \rangle = 0
# $$
# ## Projection Using Matrix Algebra
#
# Let $ S $ be a linear subspace of $ \mathbb{R}^n $ and let $ y \in \mathbb{R}^n $.
#
# We want to compute the matrix $ P $ that verifies
#
# $$
# \hat E_S y = P y
# $$
#
# Evidently $ Py $ is a linear function from $ y \in \mathbb{R}^n $ to $ P y \in \mathbb{R}^n $
#
# This reference is useful [https://en.wikipedia.org/wiki/Linear_map#Matrices](https://en.wikipedia.org/wiki/Linear_map#Matrices)
#
# **Theorem.** Let the columns of $ n \times k $ matrix $ X $ form a basis of $ S $. Then
#
# $$
# P = X (X'X)^{-1} X'
# $$
#
# Proof: Given arbitrary $ y \in \mathbb{R}^n $ and $ P = X (X'X)^{-1} X' $, our claim is that
#
# 1. $ P y \in S $, and
# 1. $ y - P y \perp S $
#
#
# Claim 1 is true because
#
# $$
# P y = X (X' X)^{-1} X' y = X a
# \quad \text{when} \quad
# a := (X' X)^{-1} X' y
# $$
#
# An expression of the form $ X a $ is precisely a linear combination of the
# columns of $ X $, and hence an element of $ S $
#
# Claim 2 is equivalent to the statement
#
# $$
# y - X (X' X)^{-1} X' y \, \perp\, X b
# \quad \text{for all} \quad
# b \in \mathbb{R}^K
# $$
#
# This is true: If $ b \in \mathbb{R}^K $, then
#
# $$
# (X b)' [y - X (X' X)^{-1} X'
# y]
# = b' [X' y - X' y]
# = 0
# $$
#
# The proof is now complete
# ### Starting with $ X $
#
# It is common in applications to start with $ n \times k $ matrix $ X $ with linearly independent columns and let
#
# $$
# S := \mathop{\mathrm{span}} X := \mathop{\mathrm{span}} \{\mathop{\mathrm{col}}_1 X, \ldots, \mathop{\mathrm{col}}_k X \}
# $$
#
# Then the columns of $ X $ form a basis of $ S $
#
# From the preceding theorem, $ P = X (X' X)^{-1} X' y $ projects $ y $ onto $ S $
#
# In this context, $ P $ is often called the **projection matrix**
#
# - The matrix $ M = I - P $ satisfies $ M y = \hat E_{S^{\perp}} y $ and is sometimes called the **annihilator matrix**
# ### The Orthonormal Case
#
# Suppose that $ U $ is $ n \times k $ with orthonormal columns
#
# Let $ u_i := \mathop{\mathrm{col}} U_i $ for each $ i $, let $ S := \mathop{\mathrm{span}} U $ and let $ y \in \mathbb{R}^n $
#
# We know that the projection of $ y $ onto $ S $ is
#
# $$
# P y = U (U' U)^{-1} U' y
# $$
#
# Since $ U $ has orthonormal columns, we have $ U' U = I $
#
# Hence
#
# $$
# P y
# = U U' y
# = \sum_{i=1}^k \langle u_i, y \rangle u_i
# $$
#
# We have recovered our earlier result about projecting onto the span of an orthonormal
# basis
# ### Application: Overdetermined Systems of Equations
#
# Let $ y \in \mathbb{R}^n $ and let $ X $ is $ n \times k $ with linearly independent columns
#
# Given $ X $ and $ y $, we seek $ b \in \mathbb{R}^k $ satisfying the system of linear equations $ X b = y $
#
# If $ n > k $ (more equations than unknowns), then $ b $ is said to be **overdetermined**
#
# Intuitively, we may not be able find a $ b $ that satisfies all $ n $ equations
#
# The best approach here is to
#
# - Accept that an exact solution may not exist
# - Look instead for an approximate solution
#
#
# By approximate solution, we mean a $ b \in \mathbb{R}^k $ such that $ X b $ is as close to $ y $ as possible
#
# The next theorem shows that the solution is well defined and unique
#
# The proof uses the OPT
#
# **Theorem** The unique minimizer of $ \| y - X b \| $ over $ b \in \mathbb{R}^K $ is
#
# $$
# \hat \beta := (X' X)^{-1} X' y
# $$
#
# Proof: Note that
#
# $$
# X \hat \beta = X (X' X)^{-1} X' y =
# P y
# $$
#
# Since $ P y $ is the orthogonal projection onto $ \mathop{\mathrm{span}}(X) $ we have
#
# $$
# \| y - P y \|
# \leq \| y - z \| \text{ for any } z \in \mathop{\mathrm{span}}(X)
# $$
#
# Because $ Xb \in \mathop{\mathrm{span}}(X) $
#
# $$
# \| y - X \hat \beta \|
# \leq \| y - X b \| \text{ for any } b \in \mathbb{R}^K
# $$
#
# This is what we aimed to show
# ## Least Squares Regression
#
# Let’s apply the theory of orthogonal projection to least squares regression
#
# This approach provides insights about many geometric properties of linear regression
#
# We treat only some examples
# ### Squared risk measures
#
# Given pairs $ (x, y) \in \mathbb{R}^K \times \mathbb{R} $, consider choosing $ f \colon \mathbb{R}^K \to \mathbb{R} $ to minimize
# the **risk**
#
# $$
# R(f) := \mathbb{E}\, [(y - f(x))^2]
# $$
#
# If probabilities and hence $ \mathbb{E}\, $ are unknown, we cannot solve this problem directly
#
# However, if a sample is available, we can estimate the risk with the **empirical risk**:
#
# $$
# \min_{f \in \mathcal{F}} \frac{1}{N} \sum_{n=1}^N (y_n - f(x_n))^2
# $$
#
# Minimizing this expression is called **empirical risk minimization**
#
# The set $ \mathcal{F} $ is sometimes called the hypothesis space
#
# The theory of statistical learning tells us that to prevent overfitting we should take the set $ \mathcal{F} $ to be relatively simple
#
# If we let $ \mathcal{F} $ be the class of linear functions $ 1/N $, the problem is
#
# $$
# \min_{b \in \mathbb{R}^K} \;
# \sum_{n=1}^N (y_n - b' x_n)^2
# $$
#
# This is the sample **linear least squares problem**
# ### Solution
#
# Define the matrices
#
# $$
# y :=
# \left(
# \begin{array}{c}
# y_1 \\
# y_2 \\
# \vdots \\
# y_N
# \end{array}
# \right),
# \quad
# x_n :=
# \left(
# \begin{array}{c}
# x_{n1} \\
# x_{n2} \\
# \vdots \\
# x_{nK}
# \end{array}
# \right)
# = \text{ $n$-th obs on all regressors}
# $$
#
# and
#
# $$
# X :=
# \left(
# \begin{array}{c}
# x_1' \\
# x_2' \\
# \vdots \\
# x_N'
# \end{array}
# \right)
# :=:
# \left(
# \begin{array}{cccc}
# x_{11} & x_{12} & \cdots & x_{1K} \\
# x_{21} & x_{22} & \cdots & x_{2K} \\
# \vdots & \vdots & & \vdots \\
# x_{N1} & x_{N2} & \cdots & x_{NK}
# \end{array}
# \right)
# $$
#
# We assume throughout that $ N > K $ and $ X $ is full column rank
#
# If you work through the algebra, you will be able to verify that $ \| y - X b \|^2 = \sum_{n=1}^N (y_n - b' x_n)^2 $
#
# Since monotone transforms don’t affect minimizers, we have
#
# $$
# \mathop{\mathrm{arg\,min}}_{b \in \mathbb{R}^K} \sum_{n=1}^N (y_n - b' x_n)^2
# = \mathop{\mathrm{arg\,min}}_{b \in \mathbb{R}^K} \| y - X b \|
# $$
#
# By our results about overdetermined linear systems of equations, the solution is
#
# $$
# \hat \beta := (X' X)^{-1} X' y
# $$
#
# Let $ P $ and $ M $ be the projection and annihilator associated with $ X $:
#
# $$
# P := X (X' X)^{-1} X'
# \quad \text{and} \quad
# M := I - P
# $$
#
# The **vector of fitted values** is
#
# $$
# \hat y := X \hat \beta = P y
# $$
#
# The **vector of residuals** is
#
# $$
# \hat u := y - \hat y = y - P y = M y
# $$
#
# Here are some more standard definitions:
#
# - The **total sum of squares** is $ := \| y \|^2 $
# - The **sum of squared residuals** is $ := \| \hat u \|^2 $
# - The **explained sum of squares** is $ := \| \hat y \|^2 $
#
#
# > TSS = ESS + SSR
#
#
# We can prove this easily using the OPT
#
# From the OPT we have $ y = \hat y + \hat u $ and $ \hat u \perp \hat y $
#
# Applying the Pythagorean law completes the proof
# ## Orthogonalization and Decomposition
#
# Let’s return to the connection between linear independence and orthogonality touched on above
#
# A result of much interest is a famous algorithm for constructing orthonormal sets from linearly independent sets
#
# The next section gives details
#
#
# <a id='gram-schmidt'></a>
# ### Gram-Schmidt Orthogonalization
#
# **Theorem** For each linearly independent set $ \{x_1, \ldots, x_k\} \subset \mathbb{R}^n $, there exists an
# orthonormal set $ \{u_1, \ldots, u_k\} $ with
#
# $$
# \mathop{\mathrm{span}} \{x_1, \ldots, x_i\}
# =
# \mathop{\mathrm{span}} \{u_1, \ldots, u_i\}
# \quad \text{for} \quad
# i = 1, \ldots, k
# $$
#
# The **Gram-Schmidt orthogonalization** procedure constructs an orthogonal set $ \{ u_1, u_2, \ldots, u_n\} $
#
# One description of this procedure is as follows:
#
# - For $ i = 1, \ldots, k $, form $ S_i := \mathop{\mathrm{span}}\{x_1, \ldots, x_i\} $ and $ S_i^{\perp} $
# - Set $ v_1 = x_1 $
# - For $ i \geq 2 $ set $ v_i := \hat E_{S_{i-1}^{\perp}} x_i $ and $ u_i := v_i / \| v_i \| $
#
#
# The sequence $ u_1, \ldots, u_k $ has the stated properties
#
# A Gram-Schmidt orthogonalization construction is a key idea behind the Kalman filter described in [A First Look at the Kalman filter](kalman.html)
#
# In some exercises below you are asked to implement this algorithm and test it using projection
# ### QR Decomposition
#
# The following result uses the preceding algorithm to produce a useful decomposition
#
# **Theorem** If $ X $ is $ n \times k $ with linearly independent columns, then there exists a factorization $ X = Q R $ where
#
# - $ R $ is $ k \times k $, upper triangular, and nonsingular
# - $ Q $ is $ n \times k $ with orthonormal columns
#
#
# Proof sketch: Let
#
# - $ x_j := \col_j (X) $
# - $ \{u_1, \ldots, u_k\} $ be orthonormal with same span as $ \{x_1, \ldots, x_k\} $ (to be constructed using Gram–Schmidt)
# - $ Q $ be formed from cols $ u_i $
#
#
# Since $ x_j \in \mathop{\mathrm{span}}\{u_1, \ldots, u_j\} $, we have
#
# $$
# x_j = \sum_{i=1}^j \langle u_i, x_j \rangle u_i
# \quad \text{for } j = 1, \ldots, k
# $$
#
# Some rearranging gives $ X = Q R $
# ### Linear Regression via QR Decomposition
#
# For matrices $ X $ and $ y $ that overdetermine $ beta $ in the linear
# equation system $ y = X \beta $, we found the least squares approximator $ \hat \beta = (X' X)^{-1} X' y $
#
# Using the QR decomposition $ X = Q R $ gives
#
# $$
# \begin{aligned}
# \hat \beta
# & = (R'Q' Q R)^{-1} R' Q' y \\
# & = (R' R)^{-1} R' Q' y \\
# & = R^{-1} (R')^{-1} R' Q' y
# = R^{-1} Q' y
# \end{aligned}
# $$
#
# Numerical routines would in this case use the alternative form $ R \hat \beta = Q' y $ and back substitution
# ## Exercises
# ### Exercise 1
#
# Show that, for any linear subspace $ S \subset \mathbb{R}^n $, $ S \cap S^{\perp} = \{0\} $
# ### Exercise 2
#
# Let $ P = X (X' X)^{-1} X' $ and let $ M = I - P $. Show that
# $ P $ and $ M $ are both idempotent and symmetric. Can you give any
# intuition as to why they should be idempotent?
# ### Exercise 3
#
# Using Gram-Schmidt orthogonalization, produce a linear projection of $ y $ onto the column space of $ X $ and verify this using the projection matrix $ P := X (X' X)^{-1} X' $ and also using QR decomposition, where:
#
# $$
# y :=
# \left(
# \begin{array}{c}
# 1 \\
# 3 \\
# -3
# \end{array}
# \right),
# \quad
# $$
#
# and
#
# $$
# X :=
# \left(
# \begin{array}{cc}
# 1 & 0 \\
# 0 & -6 \\
# 2 & 2
# \end{array}
# \right)
# $$
# ## Solutions
# ### Exercise 1
#
# If $ x \in S $ and $ x \in S^\perp $, then we have in particular
# that $ \langle x, x \rangle = 0 $. But then $ x = 0 $.
# ### Exercise 2
#
# Symmetry and idempotence of $ M $ and $ P $ can be established
# using standard rules for matrix algebra. The intuition behind
# idempotence of $ M $ and $ P $ is that both are orthogonal
# projections. After a point is projected into a given subspace, applying
# the projection again makes no difference. (A point inside the subspace
# is not shifted by orthogonal projection onto that space because it is
# already the closest point in the subspace to itself.)
# ### Exercise 3
#
# Here’s a function that computes the orthonormal vectors using the GS
# algorithm given in the lecture.
# ### Setup
# + hide-output=false
using InstantiateFromURL
activate_github("QuantEcon/QuantEconLecturePackages", tag = "v0.9.8");
# + hide-output=false
using LinearAlgebra, Statistics, Compat
# + hide-output=false
function gram_schmidt(X)
U = similar(X, Float64) # for robustness
function normalized_orthogonal_projection(b, Z)
# project onto the orthogonal complement of the col span of Z
orthogonal = I - Z * inv(Z'Z) * Z'
projection = orthogonal * b
# normalize
return projection / norm(projection)
end
for col in 1:size(U, 2)
# set up
b = X[:,col] # vector we're going to project
Z = X[:,1:col - 1] # first i-1 columns of X
U[:,col] = normalized_orthogonal_projection(b, Z)
end
return U
end
# -
# Here are the arrays we’ll work with
# + hide-output=false
y = [1, 3, -3]
X = [1 0; 0 -6; 2 2];
# -
# First let’s do ordinary projection of $ y $ onto the basis spanned
# by the columns of $ X $.
# + hide-output=false
Py1 = X * inv(X'X) * X' * y
# -
# Now let’s orthogonalize first, using Gram–Schmidt:
# + hide-output=false
U = gram_schmidt(X)
# -
# Now we can project using the orthonormal basis and see if we get the
# same thing:
# + hide-output=false
Py2 = U * U' * y
# -
# The result is the same. To complete the exercise, we get an orthonormal
# basis by QR decomposition and project once more.
# + hide-output=false
Q, R = qr(X)
Q = Matrix(Q)
# + hide-output=false
Py3 = Q * Q' * y
# -
# Again, the result is the same
| tools_and_techniques/orth_proj.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (py38)
# language: python
# name: py38
# ---
# +
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
import netCDF4 as nc
import datetime as dt
import cmocean
import string
import glob
import pickle
import matplotlib as mpl
from salishsea_tools import evaltools as et
mpl.rc('xtick', labelsize=8)
mpl.rc('ytick', labelsize=8)
mpl.rc('legend', fontsize=8)
mpl.rc('axes', titlesize=8)
mpl.rc('axes', labelsize=8)
mpl.rc('figure', titlesize=8)
mpl.rc('font', size=8)
mpl.rc('text', usetex=True)
mpl.rc('text.latex', preamble = r'''
\usepackage{txfonts}
\usepackage{lmodern}
''')
mpl.rc('font', family='sans-serif', weight='normal', style='normal')
from scipy.ndimage import gaussian_filter
from matplotlib.ticker import FormatStrFormatter
from NorthNut import vvl_interp_T_to_V, vvl_interp_T_to_U;
# %matplotlib inline
# -
# calculations have been moved to calcTranspsEstuarineExchange.py
fformat0='%Y%m%d'
mod_start=dt.datetime(2015,1,1)
mod_end=dt.datetime(2015,12,31)
savepath='../../save/transpLines'+mod_start.strftime(fformat0)+'-'+mod_end.strftime(fformat0)+'.pkl'
data=pickle.load(open(savepath,'rb'))
mod_start=data['mod_start']
mod_end=data['mod_end']
volT=data['volT']
no3T=data['no3T']
no3=data['no3']
masks=data['masks']
gdept_1d=data['gdept_1d']
e3t_1d=data['e3t_1d']
ulines=data['ulines']
vlines=data['vlines']
with nc.Dataset('/ocean/eolson/MEOPAR/NEMO-forcing/grid/mesh_mask201702_noLPE.nc') as fm:
umask=np.copy(fm.variables['umask'])
vmask=np.copy(fm.variables['vmask'])
tmask=np.copy(fm.variables['tmask'])
navlon=np.copy(fm.variables['nav_lon'])
navlat=np.copy(fm.variables['nav_lat'])
ulines
vlines
# t grid point jt, it is bounded by u/v point jt-1 below and jt above
# so next t point greater is u/v grid + 1, and domain ends at u/v
tmaskSOG=np.copy(tmask)
tmaskSOG[:,:,:500,:180]=0 # SJC
tmaskSOG[:,:,:(vlines['Rosario']['j']+1),:(vlines['Rosario']['i'][1])]=0 #west to rosario
tmaskSOG[:,:,:(vlines['Haro']['j']+1),:vlines['SJC']['i'][0]]=0 # west to haro
tmaskSOG[:,:,300:320,180:200]=0
tmaskSOG[:,:,:(vlines['SJC']['j']+1),:275]=0
tmaskSOG[:,:,:250,:325]=0
tmaskSOG[:,:,:230,:]=0
tmaskSOG[:,:,(vlines['Sutil']['j']+1):,:]=0
tmaskSOG[:,:,ulines['Malaspina']['j'][0]:,(ulines['Malaspina']['i']+1):]=0
tmaskSOG[:,:,710:,190:]=0
tmaskSOG[:,:,730:,170:180]=0
tmaskSOG[:,:,(vlines['Discovery']['j']+1):,110:135]=0
cm1=plt.get_cmap('autumn')
cm2=plt.get_cmap('winter')
fig,ax=plt.subplots(1,1,figsize=(7,10))
ax.pcolormesh(tmask[0,0,:,:],cmap=cm1)
ax.pcolormesh(np.ma.masked_where(tmaskSOG[0,0,:,:]==0,tmaskSOG[0,0,:,:]),cmap=cm2)
for el in ulines.keys():
ax.plot((ulines[el]['i'],ulines[el]['i']),(ulines[el]['j'][0],ulines[el]['j'][1]),'r-')
for el in vlines.keys():
ax.plot((vlines[el]['i'][0],vlines[el]['i'][1]),(vlines[el]['j'],vlines[el]['j']),'m-')
ax.set_xlim(100,398)
ax.set_ylim(200,760)
cm1=plt.get_cmap('autumn')
cm2=plt.get_cmap('winter')
fig,ax=plt.subplots(1,1,figsize=(7,10))
ax.pcolormesh(np.ma.masked_where(tmaskSOG[0,0,:,:]==0,tmaskSOG[0,0,:,:]),cmap=cm2)
for el in ulines.keys():
ax.plot((ulines[el]['i']+.5,ulines[el]['i']+.5),(ulines[el]['j'][0],ulines[el]['j'][1]),'r-')
for el in vlines.keys():
ax.plot((vlines[el]['i'][0],vlines[el]['i'][1]),(vlines[el]['j']+.5,vlines[el]['j']+.5),'m-')
ax.set_xlim(110,398)
ax.set_ylim(240,755)
ig0=110
ig1=398
jg0=240
jg1=755
SOGtmaskPath='../../save/SOGtmask.pkl'
pickle.dump((tmaskSOG,ig0,ig1,jg0,jg1),open(SOGtmaskPath,'wb'))
| notebooks/evalSection/transps_make_tmask.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SageMath 8.0
# language: ''
# name: sagemath
# ---
# + deletable=true editable=true
# %load_ext cython
# + deletable=true editable=true language="cython"
# import numpy as np
# cimport numpy as np
# import random
# cimport cython
#
# @cython.boundscheck(False)
# @cython.cdivision(True)
# def matriz(int N):
# cdef np.ndarray[np.uint16_t,ndim=2] M = np.zeros((2*N+1,2*N+1), \
# dtype=np.uint16)
# M[N,N]=2
# return M
#
#
#
# @cython.boundscheck(False)
# @cython.cdivision(True)
# def siguiente(np.ndarray[np.uint16_t,ndim=2] M, double p):
# cdef int n
# cdef int m
# cdef int N = M.shape[0]
# ##print N
# cdef np.uint16_t[:,:] MV = M
# for n in range(0,N-1):
# for m in range(0,N-1):
# if MV[n,m]==2:
# if MV[n-1,m] == 0:
# x = np.random.uniform(0.0,1.0)
# if x < p:
# M[n-1,m] = 2
# if MV[n,m-1] == 0:
# x = np.random.uniform(0.0,1.0)
# if x < p:
# M[n,m-1] = 2
# if MV[n+1,m] == 0:
# x = np.random.uniform(0.0,1.0)
# if x < p:
# M[n+1,m] = 2
# if MV[n,m+1] == 0:
# x = np.random.uniform(0.0,1.0)
# if x < p:
# M[n,m+1] = 2
# MV[n,m] = 1
# return M
#
#
#
# #M = matriz(5)
# #M1 = siguiente(M,0.9)
# #M2 = siguiente(M1,0.9)
# #print np.asarray(M1)
# #print np.asarray(M2)
#
#
# + deletable=true editable=true
import numpy as np
import matplotlib.pyplot as plt
#import random
import matplotlib.animation as animation
# %matplotlib notebook
M = matriz(400)
def generate_data(M):
return siguiente(M,0.501)
fig, ax = plt.subplots()
mat = ax.matshow(generate_data(M))
def update(data):
mat.set_data(data)
return mat
def data_gen():
global M
M = generate_data(M)
yield M
ani = animation.FuncAnimation(fig, update,data_gen)
plt.show()
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
# + deletable=true editable=true
| 2_Curso/Laboratorio/SAGE-noteb/IPYNB/MISCE/122-MISCE-percolacion-numpy-cython.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 (tensorflow)
# language: python
# name: rga
# ---
# <a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_03_5_weights.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # T81-558: Applications of Deep Neural Networks
# **Module 3: Introduction to TensorFlow**
# * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
# * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# # Module 3 Material
#
# * Part 3.1: Deep Learning and Neural Network Introduction [[Video]](https://www.youtube.com/watch?v=zYnI4iWRmpc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_1_neural_net.ipynb)
# * Part 3.2: Introduction to Tensorflow and Keras [[Video]](https://www.youtube.com/watch?v=PsE73jk55cE&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_2_keras.ipynb)
# * Part 3.3: Saving and Loading a Keras Neural Network [[Video]](https://www.youtube.com/watch?v=-9QfbGM1qGw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_3_save_load.ipynb)
# * Part 3.4: Early Stopping in Keras to Prevent Overfitting [[Video]](https://www.youtube.com/watch?v=m1LNunuI2fk&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_4_early_stop.ipynb)
# * **Part 3.5: Extracting Weights and Manual Calculation** [[Video]](https://www.youtube.com/watch?v=7PWgx16kH8s&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_03_5_weights.ipynb)
# # Google CoLab Instructions
#
# The following code ensures that Google CoLab is running the correct version of TensorFlow.
try:
# %tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
# # Part 3.5: Extracting Keras Weights and Manual Neural Network Calculation
#
# ### Weight Initialization
#
# The weights of a neural network determine the output for the neural network. The process of training can adjust these weights so the neural network produces useful output. Most neural network training algorithms begin by initializing the weights to a random state. Training then progresses through a series of iterations that continuously improve the weights to produce better output.
#
# The random weights of a neural network impact how well that neural network can be trained. If a neural network fails to train, you can remedy the problem by simply restarting with a new set of random weights. However, this solution can be frustrating when you are experimenting with the architecture of a neural network and trying different combinations of hidden layers and neurons. If you add a new layer, and the network’s performance improves, you must ask yourself if this improvement resulted from the new layer or from a new set of weights. Because of this uncertainty, we look for two key attributes in a weight initialization algorithm:
#
# * How consistently does this algorithm provide good weights?
# * How much of an advantage do the weights of the algorithm provide?
#
# One of the most common, yet least effective, approaches to weight initialization is to set the weights to random values within a specific range. Numbers between -1 and +1 or -5 and +5 are often the choice. If you want to ensure that you get the same set of random weights each time, you should use a seed. The seed specifies a set of predefined random weights to use. For example, a seed of 1000 might produce random weights of 0.5, 0.75, and 0.2. These values are still random; you cannot predict them, yet you will always get these values when you choose a seed of 1000.
# Not all seeds are created equal. One problem with random weight initialization is that the random weights created by some seeds are much more difficult to train than others. In fact, the weights can be so bad that training is impossible. If you find that you cannot train a neural network with a particular weight set, you should generate a new set of weights using a different seed.
#
# Because weight initialization is a problem, there has been considerable research around it. In this course we use the Xavier weight initialization algorithm, introduced in 2006 by Glorot & Bengio[[Cite:glorot2010understanding]](http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf), produces good weights with reasonable consistency. This relatively simple algorithm uses normally distributed random numbers.
#
# To use the Xavier weight initialization, it is necessary to understand that normally distributed random numbers are not the typical random numbers between 0 and 1 that most programming languages generate. In fact, normally distributed random numbers are centered on a mean ($\mu$, mu) that is typically 0. If 0 is the center (mean), then you will get an equal number of random numbers above and below 0. The next question is how far these random numbers will venture from 0. In theory, you could end up with both positive and negative numbers close to the maximum positive and negative ranges supported by your computer. However, the reality is that you will more likely see random numbers that are between 0 and three standard deviations from the center.
#
# The standard deviation ($\sigma$, sigma) parameter specifies the size of this standard deviation. For example, if you specified a standard deviation of 10, then you would mainly see random numbers between -30 and +30, and the numbers nearer to 0 have a much higher probability of being selected.
#
# The above figure illustrates that the center, which in this case is 0, will be generated with a 0.4 (40%) probability. Additionally, the probability decreases very quickly beyond -2 or +2 standard deviations. By defining the center and how large the standard deviations are, you are able to control the range of random numbers that you will receive.
#
# The Xavier weight initialization sets all of the weights to normally distributed random numbers. These weights are always centered at 0; however, their standard deviation varies depending on how many connections are present for the current layer of weights. Specifically, Equation 4.2 can determine the standard deviation:
#
# $ Var(W) = \frac{2}{n_{in}+n_{out}} $
#
# The above equation shows how to obtain the variance for all of the weights. The square root of the variance is the standard deviation. Most random number generators accept a standard deviation rather than a variance. As a result, you usually need to take the square root of the above equation. The following figure shows how one layer might be initialized.
#
# ![Xavier Weight Initialization](images/xavier_weight.png)
#
# This process is completed for each layer in the neural network.
#
# ### Manual Neural Network Calculation
#
# In this section we will build a neural network and analyze it down the individual weights. We will train a simple neural network that learns the XOR function. It is not hard to simply hand-code the neurons to provide an [XOR function](https://en.wikipedia.org/wiki/Exclusive_or); however, for simplicity, we will allow Keras to train this network for us. We will just use 100K epochs on the ADAM optimizer. This is massive overkill, but it gets the result, and our focus here is not on tuning. The neural network is small. Two inputs, two hidden neurons, and a single output.
# +
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
import numpy as np
# Create a dataset for the XOR function
x = np.array([
[0,0],
[1,0],
[0,1],
[1,1]
])
y = np.array([
0,
1,
1,
0
])
# Build the network
# sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
done = False
cycle = 1
while not done:
print("Cycle #{}".format(cycle))
cycle+=1
model = Sequential()
model.add(Dense(2, input_dim=2, activation='relu'))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(x,y,verbose=0,epochs=10000)
# Predict
pred = model.predict(x)
# Check if successful. It takes several runs with this small of a network
done = pred[0]<0.01 and pred[3]<0.01 and pred[1] > 0.9 and pred[2] > 0.9
print(pred)
# -
pred[3]
# The output above should have two numbers near 0.0 for the first and forth spots (input [[0,0]] and [[1,1]]). The middle two numbers should be near 1.0 (input [[1,0]] and [[0,1]]). These numbers are in scientific notation. Due to random starting weights, it is sometimes necessary to run the above through several cycles to get a good result.
#
# Now that the neural network is trained, lets dump the weights.
# Dump weights
for layerNum, layer in enumerate(model.layers):
weights = layer.get_weights()[0]
biases = layer.get_weights()[1]
for toNeuronNum, bias in enumerate(biases):
print(f'{layerNum}B -> L{layerNum+1}N{toNeuronNum}: {bias}')
for fromNeuronNum, wgt in enumerate(weights):
for toNeuronNum, wgt2 in enumerate(wgt):
print(f'L{layerNum}N{fromNeuronNum} -> L{layerNum+1}N{toNeuronNum} = {wgt2}')
# If you rerun this, you probably get different weights. There are many ways to solve the XOR function.
#
# In the next section, we copy/paste the weights from above and recreate the calculations done by the neural network. Because weights can change with each training, the weights used for the below code came from this:
#
# ```
# 0B -> L1N0: -1.2913415431976318
# 0B -> L1N1: -3.021530048386012e-08
# L0N0 -> L1N0 = 1.2913416624069214
# L0N0 -> L1N1 = 1.1912699937820435
# L0N1 -> L1N0 = 1.2913411855697632
# L0N1 -> L1N1 = 1.1912697553634644
# 1B -> L2N0: 7.626241297587034e-36
# L1N0 -> L2N0 = -1.548777461051941
# L1N1 -> L2N0 = 0.8394404649734497
# ```
# +
input0 = 0
input1 = 1
hidden0Sum = (input0*1.3)+(input1*1.3)+(-1.3)
hidden1Sum = (input0*1.2)+(input1*1.2)+(0)
print(hidden0Sum) # 0
print(hidden1Sum) # 1.2
hidden0 = max(0,hidden0Sum)
hidden1 = max(0,hidden1Sum)
print(hidden0) # 0
print(hidden1) # 1.2
outputSum = (hidden0*-1.6)+(hidden1*0.8)+(0)
print(outputSum) # 0.96
output = max(0,outputSum)
print(output) # 0.96
# -
| t81_558_class_03_5_weights.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook is designed to run in a IBM Watson Studio default runtime (NOT the Watson Studio Apache Spark Runtime as the default runtime with 1 vCPU is free of charge). Therefore, we install Apache Spark in local mode for test purposes only. Please don't use it in production.
#
# In case you are facing issues, please read the following two documents first:
#
# https://github.com/IBM/skillsnetwork/wiki/Environment-Setup
#
# https://github.com/IBM/skillsnetwork/wiki/FAQ
#
# Then, please feel free to ask:
#
# https://coursera.org/learn/machine-learning-big-data-apache-spark/discussions/all
#
# Please make sure to follow the guidelines before asking a question:
#
# https://github.com/IBM/skillsnetwork/wiki/FAQ#im-feeling-lost-and-confused-please-help-me
#
#
# If running outside Watson Studio, this should work as well. In case you are running in an Apache Spark context outside Watson Studio, please remove the Apache Spark setup in the first notebook cells.
# +
from IPython.display import Markdown, display
def printmd(string):
display(Markdown('# <span style="color:red">'+string+'</span>'))
if ('sc' in locals() or 'sc' in globals()):
printmd('<<<<<!!!!! It seems that you are running in a IBM Watson Studio Apache Spark Notebook. Please run it in an IBM Watson Studio Default Runtime (without Apache Spark) !!!!!>>>>>')
# -
# !pip install pyspark==2.4.5
try:
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
except ImportError as e:
printmd('<<<<<!!!!! Please restart your kernel after installing Apache Spark !!!!!>>>>>')
# +
sc = SparkContext.getOrCreate(SparkConf().setMaster("local[*]"))
spark = SparkSession \
.builder \
.getOrCreate()
# -
# In case you want to learn how ETL is done, please run the following notebook first and update the file name below accordingly
#
# https://github.com/IBM/coursera/blob/master/coursera_ml/a2_w1_s3_ETL.ipynb
# +
# delete files from previous runs
# !rm -f hmp.parquet*
# download the file containing the data in PARQUET format
# !wget https://github.com/IBM/coursera/raw/master/hmp.parquet
# create a dataframe out of it
df = spark.read.parquet('hmp.parquet')
# register a corresponding query table
df.createOrReplaceTempView('df')
# +
from pyspark.ml.feature import VectorAssembler
vectorAssembler = VectorAssembler(inputCols=["x","y","z"],
outputCol="features")
# +
from pyspark.ml.clustering import KMeans
kmeans = KMeans().setK(13).setSeed(1)
# +
from pyspark.ml import Pipeline
pipeline = Pipeline(stages=[vectorAssembler, kmeans])
# -
df.createOrReplaceTempView('df')
df = spark.sql("select * from df where class in ('Brush_teeth','Climb_stairs')")
model = pipeline.fit(df)
wssse = model.stages[1].computeCost(vectorAssembler.transform(df))
print("Within Set Sum of Squared Errors = " + str(wssse))
| coursera_ml/a2_w3_kmeans_SparkML.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demo - Fairness Analysis of COMPAS by ProPublica
#
# Based on: https://github.com/propublica/compas-analysis
#
# What follows are the calculations performed for ProPublica's analaysis of the COMPAS Recidivism Risk Scores. It might be helpful to open [the methodology](https://www.propublica.org/article/how-we-analyzed-the-compas-recidivism-algorithm/) in another tab to understand the following.
# +
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pylab as plt
import seaborn as sns
from responsibly.dataset import COMPASDataset
from responsibly.fairness.metrics import distplot_by
# -
# ## Loading the Data
#
# We select fields for severity of charge, number of priors, demographics, age, sex, compas scores, and whether each person was accused of a crime within two years.
#
# There are a number of reasons remove rows because of missing data:
#
# - If the charge date of a defendants Compas scored crime was not within 30 days from when the person was arrested, we assume that because of data quality reasons, that we do not have the right offense.
#
# - We coded the recidivist flag -- `is_recid` -- to be -1 if we could not find a compas case at all.
#
# - In a similar vein, ordinary traffic offenses -- those with a `c_charge_degree` of 'O' -- will not result in Jail time are removed (only two of them).
#
# - We filtered the underlying data from Broward county to include only those rows representing people who had either recidivated in two years, or had at least two years outside of a correctional facility.
#
# All of this is already done by instantiating a `COMPASDataset` object from `responsibly`.
# +
compas_ds = COMPASDataset()
df = compas_ds.df
len(df)
# -
# ## EDA
# Higher COMPAS scores are slightly correlated with a longer length of stay.
stats.pearsonr(df['length_of_stay'].astype(int), df['decile_score'])
# After filtering we have the following demographic breakdown:
df['age_cat'].value_counts()
df['race'].value_counts()
(((df['race'].value_counts() / len(df))
* 100)
.round(2))
df['score_text'].value_counts()
pd.crosstab(df['sex'], df['race'])
(((df['sex'].value_counts() / len(df))
* 100)
.round(2))
df['two_year_recid'].value_counts()
(((df['two_year_recid'].value_counts() / len(df))
* 100)
.round(2))
# Judges are often presented with two sets of scores from the Compas system -- one that classifies people into High, Medium and Low risk, and a corresponding decile score. There is a clear downward trend in the decile scores as those scores increase for white defendants.
RACE_IN_FOCUS = ['African-American', 'Caucasian']
df_race_focused = df[df['race'].isin(RACE_IN_FOCUS)]
g = sns.FacetGrid(df_race_focused, col='race', height=7)#, aspect=4,)
g.map(plt.hist, 'decile_score', rwidth=0.9);
distplot_by(df['decile_score'], df['race'], hist=False);
pd.crosstab(df['decile_score'], df['race'])
pd.crosstab(df['two_year_recid'], df['race'], normalize='index')
pd.crosstab(df_race_focused['two_year_recid'],
df_race_focused['race'],
normalize='index')
# ## Fairness Demographic Classification Criteria
#
# Based on: https://fairmlbook.org/demographic.html
from responsibly.fairness.metrics import (independence_binary,
separation_binary,
sufficiency_binary,
independence_score,
separation_score,
sufficiency_score,
report_binary,
plot_roc_by_attr)
# ### Independence
indp, indp_cmp = independence_binary((df_race_focused['decile_score'] > 4),
df_race_focused['race'],
'Caucasian',
as_df=True)
indp, indp_cmp = independence_binary((df_race_focused['decile_score'] > 4),
df_race_focused['race'],
'Caucasian',
as_df=True)
indp.plot(kind='bar');
indp_cmp
independence_score(df_race_focused['decile_score'],
df_race_focused['race'], as_df=True).plot();
# ### Separation
sep, sep_cmp = separation_binary(df_race_focused['two_year_recid'],
(df_race_focused['decile_score'] > 4),
df_race_focused['race'],
'Caucasian',
as_df=True)
sep.plot(kind='bar');
sep_cmp
plot_roc_by_attr(df_race_focused['two_year_recid'],
df_race_focused['decile_score'],
df_race_focused['race'],
figsize=(7, 7));
# ### Sufficiency
suff, suff_cmp = sufficiency_binary(df_race_focused['two_year_recid'],
(df_race_focused['decile_score'] > 4),
df_race_focused['race'],
'Caucasian',
as_df=True)
suff.plot(kind='bar');
suff_cmp
sufficiency_score(df_race_focused['two_year_recid'],
df_race_focused['decile_score'],
df_race_focused['race'],
as_df=True).plot();
# #### Transforming the score to percentiles by group
sufficiency_score(df_race_focused['two_year_recid'],
df_race_focused['decile_score'],
df_race_focused['race'],
within_score_percentile=True,
as_df=True).plot();
# ### Generating all the relevant statistics for a binary prediction
report_binary(df_race_focused['two_year_recid'],
df_race_focused['decile_score'] > 4,
df_race_focused['race'])
# ## Threshold Intervention
from responsibly.fairness.metrics import roc_curve_by_attr
from responsibly.fairness.interventions.threshold import (find_thresholds_by_attr,
plot_fpt_tpr,
plot_roc_curves_thresholds,
plot_costs,
plot_thresholds)
rocs = roc_curve_by_attr(df_race_focused['two_year_recid'],
df_race_focused['decile_score'],
df_race_focused['race'])
# ### Thresholds vs. FPR and TPR
plot_fpt_tpr(rocs);
# ### Comparison of Different Criteria
#
# * Single threshold (Group Unaware)
# * Minimum Cost
# * Independence (Demographic Parity)
# * FNR (Equality of opportunity)
# * Separation (Equalized odds)
#
# #### Cost: $FP = FN = -1$
COST_MATRIX = [[0, -1],
[-1, 0]]
thresholds_data = find_thresholds_by_attr(df_race_focused['two_year_recid'],
df_race_focused['decile_score'],
df_race_focused['race'],
COST_MATRIX)
plot_roc_curves_thresholds(rocs, thresholds_data);
# ### Thresholds by Strategy and Group
plot_thresholds(thresholds_data, xlim=(0, 10));
# ### Cost by Threshold Strategy
plot_costs(thresholds_data);
| docs/notebooks/demo-compas-analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# DataSet Example
# ==================
import qcodes as qc
import pprint as pp
import utils
from plots import ScanPlotFromDataSet
# %matplotlib notebook
from IPython.display import Image
data = qc.load_data('data/2018-06-06/#002_scan_09-29-05')
data
# Plotting
# --------
# Generate interactive plot like the one created during the scan:
scan_plot = ScanPlotFromDataSet(data)
# Display plot as image (not interactive):
Image(filename=data.location + '/' + data.metadata['loop']['metadata']['fname'] + '.png')
# Explore DataSet metadata
# --------------------------
print(list(data.metadata.keys()))
print(list(data.metadata['station'].keys()))
print(list(data.metadata['loop']['metadata'].keys()))
# Measurement loop metadata
pp.pprint(data.metadata['loop']['metadata']['channels'])
data.metadata['loop']['metadata']['prefactors']
# Instrument snapshots
SUSC_snap = data.metadata['station']['instruments']['SUSC_lockin']
for name, param in SUSC_snap['parameters'].items():
if 'value' in param.keys():
print(name, param['value'], param['unit'])
# Convert DataSet to arrays with real units
# -------------------------------------------
# Leave everything in DAQ voltage units
arrays = utils.scan_to_arrays(data, real_units=False)
for name, array in arrays.items():
print((name, array.units))
# Convert $z$-data to real units, but leave $x$ and $y$ as voltages
arrays = utils.scan_to_arrays(data, real_units=True)
for name, array in arrays.items():
print((name, array.units))
# Convert $z$-data to real units and $x$, $y$ to $\mu\mathrm{m}$:
arrays = utils.scan_to_arrays(data, real_units=True, xy_unit='um')
for name, array in arrays.items():
print((name, array.units))
print((arrays['x'].magnitude[0], arrays['x'].units))
# Convert $z$-data to real units and $x$, $y$ to $\mathrm{nm}$:
arrays = utils.scan_to_arrays(data, real_units=True, xy_unit='nm')
for name, array in arrays.items():
print((name, array.units))
print((arrays['x'].magnitude[0], arrays['x'].units))
# Export data to a MAT file:
# ---------------------------
utils.scan_to_mat_file(data, real_units=True, xy_unit='um')
utils.scan_to_mat_file(data, real_units=True, xy_unit=None)
utils.scan_to_mat_file(data, real_units=False)
| docs/examples/DataSetExample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.10 64-bit (''base'': conda)'
# language: python
# name: python3
# ---
# # Dfsu - spectral data
#
# MIKE 21 SW can output full spectral information in points, along lines or in an area. In all these cases data are stored in dfsu files with additional axes: frequency and directions.
#
# This notebook explores reading __full__ spectral dfsu files from MIKE 21 SW as
#
# * point
# * line
# * area
#
import numpy as np
import matplotlib.pyplot as plt
from mikeio import Dfsu
# ## Read dfsu point spectrum
fn = "../tests/testdata/pt_spectra.dfsu"
dfs = Dfsu(fn)
dfs
ds = dfs.read(time_steps=0)
spec = np.squeeze(ds[0])
ax = dfs.plot_spectrum(spec, rmax=8, plot_type="patch");
dird = np.round(dfs.directions,2)
ax.set_thetagrids(dird,labels=dird);
# ## Dfsu line spectrum
#
# Data in dfsu line spectra is node-based contrary to must other dfsu-formats.
fn = "../tests/testdata/line_spectra.dfsu"
dfs = Dfsu(fn)
dfs
ds = dfs.read()
spec = np.squeeze(ds[0][0,3,:,:]) # note first 3 points are outside domain
dfs.plot_spectrum(spec, cmap="Greys", rmax=8, r_as_periods=True);
# ### Plot Hm0 on a line
Hm0 = dfs.calc_Hm0_from_spectrum(ds[0])
timestep = 0
plt.plot(dfs.node_coordinates[:,0],Hm0[timestep,:])
plt.title('Hm0 on a line crossing the domain')
plt.xlabel("Longitude [degrees]")
plt.ylabel("Hm0 [m]");
# ## Dfsu area spectrum
fn = "../tests/testdata/area_spectra.dfsu"
dfs = Dfsu(fn)
dfs
ds = dfs.read()
# ### Plot map of Hm0 calculated from spectrum
Hm0 = dfs.calc_Hm0_from_spectrum(ds[0])
Hm0.shape
ax = dfs.plot(Hm0[-1,:], label="Hm0 [m]") # last time step
ax.set_xlabel("Longitude")
ax.set_ylabel("Latitude");
# ### Interactive widget for exploring spectra in different points
from ipywidgets import interact
from datetime import timedelta
@interact
def plot_element(id=(0,dfs.n_elements-1), step=(0,dfs.n_timesteps-1)):
spec = np.squeeze(ds[0][step,id])
time = dfs.start_time + timedelta(seconds=(step*dfs.timestep))
dfs.plot_spectrum(spec, vmax=0.04, vmin=0, rmax=8, title=f"Wave spectrum, {time}, element: {id}");
plt.show()
| notebooks/Dfsu - Spectral data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 寻找最大间隔
# 分隔超平面的形式:$W^TX+b$
# 由点到直线距离公式:
# $$
# d=\frac{|Ax_0+By_0+C|}{\sqrt{A^2+B^2}}
# $$
# 计算点A到分隔超平面的距离,计算点到分隔面的法线长度:
# $$
# \frac{|w^TA+b|}{||w||}
# $$
#
# $$
# {\rm arg} \max_{w,b}\left \{ \min_n(\rm label\cdot({\mit w}^T{\mit x}+b))\cdot\frac{1}{||{\mit w}||}\right \}
# $$
# ## SMO高效优化算法
# +
'''SMO算法中的辅助函数
'''
import numpy as np
def loadDataSet(fileName):
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr = line.strip().split('\t')
dataMat.append([float(lineArr[0]), float(lineArr[1])])
labelMat.append(float(lineArr[2]))
return dataMat, labelMat
def selectJrand(i, m):
j = i
while (j==i):
j = int(np.random.uniform(0, m))
return j
def clipAlpha(aj, H, L):
if aj > H:
aj = H
if L > aj:
aj = L
return aj
# -
dataArr, labelArr = loadDataSet('dataset/svm/testSet.txt')
# +
'''简化版SMO算法
'''
def smoSimple(dataMatIn, classLabels, C, toler, maxIter):
dataMatrix = np.mat(dataMatIn)
labelMat = np.mat(classLabels).transpose()
b = 0; m, n = np.shape(dataMatrix)
alphas = np.mat(np.zeros((m, 1)))
iter = 0
while (iter < maxIter):
alphaPairsChanged = 0
for i in range(m):
fXi = float(np.multiply(alphas, labelMat).T*\
(dataMatrix*dataMatrix[i,:].T)) + b
Ei = fXi - float(labelMat[i])
if ((labelMat[i]*Ei < -toler) and (alphas[i] < C)) or \
((labelMat[i]*Ei > toler) and \
(alphas[i] > 0)):
j = selectJrand(i ,m)
fXj = float(np.multiply(alphas, labelMat).T*\
(dataMatrix*dataMatrix[j,:].T)) + b
Ej = fXj - float(labelMat[j])
alphaIold = alphas[i].copy()
alphaJold = alphas[j].copy()
if (labelMat[i] != labelMat[j]):
L = np.max(0, alphas[j] - alphas[i])
H = np.min(C, C + alphas[j] - alphas[i])
else:
L = max(0, alphas[j] + alphas[i] - C)
H = np.min(C, alphas[j] + alphas[i])
if L==H:
print("L==H")
continue
eta = 2.0 * dataMatrix[i, :] * dataMatrix[j, :].T - \
dataMatrix[i, :] * dataMatrix[i, :].T - \
dataMatrix[j, :] * dataMatrix[j, :].T
if eta >= 0:
print("eta>=0")
continue
alphas[j] -= labelMat[j] * (Ei - Ej) / eta
alphas[j] = clipAlpha(alphas[j], H, L)
if (abs(alphas[j] - alphaJold) < 0.00001):
print("j not moving enough")
continue
alphas[i] += labelMat[j] * labelMat[i] *\
(alphaJold - alphas[j])
b1 = b - Ei - labelMat[i] * (alphas[i] - alphaIold) * \
dataMatrix[i, :] * dataMatrix[i, :].T - \
labelMat[j] * (alphas[j] - alphaJold) * \
dataMatrix[i, :] * dataMatrix[j, :].T
b2 = b - Ej - labelMat[i] * (alphas[i] - alphaIold) * \
dataMatrix[i, :] * dataMatrix[j, :].T - \
labelMat[j] * (alphas[j] - alphaJold) * \
dataMatrix[j, :] * dataMatrix[j, :].T
if (0 < alphas[i]) and (C > alphas[i]):
b = b1
elif (0 < alphas[j]) and (C > alphas[j]):
b = b2
else:
b = (b1 + b2) / 2.0
alphaPairsChanged += 1
print("iter: %d i: ^d, pairs changed %d" %\
(iter, i, alphaPairsChanged))
if(alphaPairsChanged == 0):
iter += 1
else:
iter = 0
print("iteration number: %d") % iter
return b, alphas
# +
'''完整版Platt SMO的支持函数
'''
class optStruct:
def __init__(self, dataMatIn, classLabels, C, toler):
self.X = dataMatIn
self.labelMat = classLabels
self.C = C
self.tol = toler
self.m = np.shape(dataMatIn)[0]
self.alphas = np.mat(np.zeros((self.m, 1)))
self.b = 0
self.eCache = np.mat(np.zeros((self.m, 2)))
def calcEk(oS, k):
fXk = float(multiply(oS.alphas, oS.labelMat).T*\
(oS.X*os.X[k, :].T)) + oS.b
Ek = fXk - float(oS.labelMat[k])
return Ek
def selectJ(i, oS, Ei):
maxK = -1; maxDeltaE = 0; Ej = 0
oS.eCache[i] = [1, Ei]
validEcacheList = np.nonzero(oS.eCache[:, 0].A)[0]
if (len(validEcacheList))
# -
| supportVecMachine.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#
# ---
# # VIDEO: Algebraic and geometric interpretations
# ---
#
# +
# 2-dimensional vector
v2 = [ 3, -2 ]
# 3-dimensional vector
v3 = [ 4, -3, 2 ]
# row to column (or vice-versa):
v3t = np.transpose(v3)
# plot them
plt.plot([0,v2[0]],[0,v2[1]])
plt.axis('equal')
plt.plot([-4, 4],[0, 0],'k--')
plt.plot([0, 0],[-4, 4],'k--')
plt.grid()
plt.axis((-4, 4, -4, 4))
plt.show()
# plot the 3D vector
fig = plt.figure(figsize=plt.figaspect(1))
ax = fig.gca(projection='3d')
ax.plot([0, v3[0]],[0, v3[1]],[0, v3[2]],linewidth=3)
# make the plot look nicer
ax.plot([0, 0],[0, 0],[-4, 4],'k--')
ax.plot([0, 0],[-4, 4],[0, 0],'k--')
ax.plot([-4, 4],[0, 0],[0, 0],'k--')
plt.show()
# -
#
# ---
# # VIDEO: Vector addition/subtraction
# ---
#
# +
# two vectors in R2
v1 = np.array([ 3, -1 ])
v2 = np.array([ 2, 4 ])
v3 = v1 + v2
# plot them
plt.plot([0, v1[0]],[0, v1[1]],'b',label='v1')
plt.plot([0, v2[0]]+v1[0],[0, v2[1]]+v1[1],'r',label='v2')
plt.plot([0, v3[0]],[0, v3[1]],'k',label='v1+v2')
plt.legend()
plt.axis('square')
plt.axis((-6, 6, -6, 6 ))
plt.grid()
plt.show()
# -
#
# ---
# # VIDEO: Vector-scalar multiplication
# ---
#
# +
# vector and scalar
v1 = np.array([ 3, -1 ])
l = 2.3
v1m = v1*l # scalar-modulated
# plot them
plt.plot([0, v1[0]],[0, v1[1]],'b',label='v_1')
plt.plot([0, v1m[0]],[0, v1m[1]],'r:',label='\lambda v_1')
plt.axis('square')
axlim = max([max(abs(v1)),max(abs(v1m))])*1.5 # dynamic axis lim
plt.axis((-axlim,axlim,-axlim,axlim))
plt.grid()
plt.show()
# -
#
# ---
# # VIDEO: Vector-vector multiplication: the dot product
# ---
#
# +
## many ways to compute the dot product
v1 = np.array([ 1, 2, 3, 4, 5, 6 ])
v2 = np.array([ 0, -4, -3, 6, 5 ])
# method 1
dp1 = sum( np.multiply(v1,v2) )
# method 2
dp2 = np.dot( v1,v2 )
# method 3
dp3 = np.matmul( v1,v2 )
# method 4
dp4 = 0 # initialize
# loop over elements
for i in range(len(v1)):
# multiply corresponding element and sum
dp4 = dp4 + v1[i]*v2[i]
print(dp1,dp2,dp3,dp4)
# -
# # VIDEO: Dot product properties: associative and distributive
# +
## Distributive property
# create random vectors
n = 10
a = np.random.randn(n)
b = np.random.randn(n)
c = np.random.randn(n)
# the two results
res1 = np.dot( a , (b+c) )
res2 = np.dot(a,b) + np.dot(a,c)
# compare them
print([ res1,res2 ])
# +
## Associative property
# create random vectors
n = 5
a = np.random.randn(n)
b = np.random.randn(n)
c = np.random.randn(n)
# the two results
res1 = np.dot( a , np.dot(b,c) )
res2 = np.dot( np.dot(a,b) , c )
# compare them
print(res1)
print(res2)
### special cases where associative property works!
# 1) one vector is the zeros vector
# 2) a==b==c
# -
#
# ---
# # VIDEO: Vector length
# ---
#
# +
# a vector
v1 = np.array([ 1, 2, 3, 4, 5, 6 ])
# methods 1-4, just like with the regular dot product, e.g.:
vl1 = np.sqrt( sum( np.multiply(v1,v1)) )
# method 5: take the norm
vl2 = np.linalg.norm(v1)
print(vl1,vl2)
# -
#
# ---
# # VIDEO: The dot product from a geometric perspective
# ---
#
# +
# two vectors
v1 = np.array([ 2, 4, -3 ])
v2 = np.array([ 0, -3, -3 ])
# compute the angle (radians) between two vectors
ang = np.arccos( np.dot(v1,v2) / (np.linalg.norm(v1)*np.linalg.norm(v2)) )
# draw them
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot([0, v1[0]],[0, v1[1]],[0, v1[2]],'b')
ax.plot([0, v2[0]],[0, v2[1]],[0, v2[2]],'r')
plt.axis((-6, 6, -6, 6))
plt.title('Angle between vectors: %s rad.' %ang)
plt.show()
# +
## equivalence of algebraic and geometric dot product formulas
# two vectors
v1 = np.array([ 2, 4, -3 ])
v2 = np.array([ 0, -3, -3 ])
# algebraic
dp_a = np.dot( v1,v2 )
# geometric
dp_g = np.linalg.norm(v1)*np.linalg.norm(v2)*np.cos(ang)
# print dot product to command
print(dp_a)
print(dp_g)
# -
#
# ---
# # VIDEO: Vector Hadamard multiplication
# ---
#
# +
# create vectors
w1 = [ 1, 3, 5 ]
w2 = [ 3, 4, 2 ]
w3 = np.multiply(w1,w2)
print(w3)
# -
#
# ---
# # VIDEO: Vector outer product
# ---
#
# +
v1 = np.array([ 1, 2, 3 ])
v2 = np.array([ -1, 0, 1 ])
# outer product
np.outer(v1,v2)
# terrible programming, but helps conceptually:
op = np.zeros((len(v1),len(v1)))
for i in range(0,len(v1)):
for j in range(0,len(v2)):
op[i,j] = v1[i] * v2[j]
print(op)
# -
#
# ---
# # VIDEO: Vector cross product
# ---
#
# +
# create vectors
v1 = [ -3, 2, 5 ]
v2 = [ 4, -3, 0 ]
# Python's cross-product function
v3a = np.cross( v1,v2 )
# "manual" method
v3b = [ [v1[1]*v2[2] - v1[2]*v2[1]],
[v1[2]*v2[0] - v1[0]*v2[2]],
[v1[0]*v2[1] - v1[1]*v2[0]] ]
print(v3a,v3b)
fig = plt.figure()
ax = fig.gca(projection='3d')
# draw plane defined by span of v1 and v2
xx, yy = np.meshgrid(np.linspace(-10,10,10),np.linspace(-10,10,10))
z1 = (-v3a[0]*xx - v3a[1]*yy)/v3a[2]
ax.plot_surface(xx,yy,z1,alpha=.2)
## plot the two vectors
ax.plot([0, v1[0]],[0, v1[1]],[0, v1[2]],'k')
ax.plot([0, v2[0]],[0, v2[1]],[0, v2[2]],'k')
ax.plot([0, v3a[0]],[0, v3a[1]],[0, v3a[2]],'r')
ax.view_init(azim=150,elev=45)
plt.show()
# -
#
# ---
# # VIDEO: Hermitian transpose (a.k.a. conjugate transpose)
# ---
#
# +
# create a complex number
z = np.complex(3,4)
# magnitude
print( np.linalg.norm(z) )
# by transpose?
print( np.transpose(z)*z )
# by Hermitian transpose
print( np.transpose(z.conjugate())*z )
# complex vector
v = np.array( [ 3, 4j, 5+2j, np.complex(2,-5) ] )
print( v.T )
print( np.transpose(v) )
print( np.transpose(v.conjugate()) )
# -
#
# ---
# # VIDEO: Unit vector
# ---
#
# +
# vector
v1 = np.array([ -3, 6 ])
# mu
mu = 1/np.linalg.norm(v1)
v1n = v1*mu
# plot them
plt.plot([0, v1n[0]],[0, v1n[1]],'r',label='v1-norm',linewidth=5)
plt.plot([0, v1[0]],[0, v1[1]],'b',label='v1')
# axis square
plt.axis('square')
plt.axis(( -6, 6, -6, 6 ))
plt.grid()
plt.legend()
plt.show()
# -
#
# ---
# # VIDEO: Span
# ---
#
# +
# set S
S1 = np.array([1, 1, 0])
S2 = np.array([1, 7, 0])
# vectors v and w
v = np.array([1, 2, 0])
w = np.array([3, 2, 1])
# draw vectors
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot([0, S1[0]],[0, S1[1]],[.1, S1[2]+.1],'r',linewidth=3)
ax.plot([0, S2[0]],[0, S2[1]],[.1, S2[2]+.1],'r',linewidth=3)
ax.plot([0, v[0]],[0, v[1]],[.1, v[2]+.1],'g',linewidth=3)
ax.plot([0, w[0]],[0, w[1]],[0, w[2]],'b')
# now draw plane
xx, yy = np.meshgrid(range(-15,16), range(-15,16))
cp = np.cross(S1,S2)
z1 = (-cp[0]*xx - cp[1]*yy)*1./cp[2]
ax.plot_surface(xx,yy,z1)
plt.show()
| matlab/udemy/Vectors/linalg_vectors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.5
# language: python
# name: python3
# ---
# ## The Battle of the Neighborhoods - Week 1
# ### Introduction & Business Problem :
# ### Problem Background:
# The City of New York, is the most populous city in the United States. It is diverse and is the financial capital of USA. It is multicultural. It provides lot of business oppourtunities and business friendly environment. It has attracted many different players into the market. It is a global hub of business and commerce. The city is a major center for banking and finance, retailing, world trade, transportation, tourism, real estate, new media, traditional media, advertising, legal services, accountancy, insurance, theater, fashion, and the arts in the United States.
#
# This also means that the market is highly competitive. As it is highly developed city so cost of doing business is also one of the highest.
# Thus, any new business venture or expansion needs to be analysed carefully. The insights derived from analysis will give good understanding
# of the business environment which help in strategically targeting the market. This will help in reduction of risk. And the Return on Investment will be reasonable.
# ### Problem Description:
# A restaurant is a business which prepares and serves food and drink to customers in return for money, either paid before the meal, after the meal, or with an open account. The City of New York is famous for its excelllent cuisine. It's food culture includes an array of international cuisines influenced by the city's immigrant history.<Br>
# 1. Central and Eastern European immigrants, especially Jewish immigrants - bagels, cheesecake, hot dogs, knishes, and delicatessens<Br>
# 2. Italian immigrants - New York-style pizza and Italian cuisine<Br>
# 3. Jewish immigrants and Irish immigrants - pastrami and corned beef<Br>
# 4. Chinese and other Asian restaurants, sandwich joints, trattorias, diners, and coffeehouses are ubiquitous throughout the city<Br>
# 5. mobile food vendors - Some 4,000 licensed by the city<Br>
# 6. Middle Eastern foods such as falafel and kebabs examples of modern New York street food<Br>
# 7. It is famous for not just Pizzerias, Cafe's but also for fine dining Michelin starred restaurants.The city is home to "nearly one thousand of the finest and most diverse haute cuisine restaurants in the world", according to Michelin.
#
# So it is evident that to survive in such competitive market it is very important to startegically plan. Various factors need to be studied inorder to decide on the Location such as : <Br>
# 1. New York Population <Br>
# 2. New York City Demographics <Br>
# 2. Are there any Farmers Markets, Wholesale markets etc nearby so that the ingredients can be purchased fresh to maintain quality and cost?<Br>
# 3. Are there any venues like Gyms, Entertainmnet zones, Parks etc nearby where floating population is high etc <Br>
# 4. Who are the competitors in that location? <Br>
# 5. Cuisine served / Menu of the competitors <Br>
# 6. Segmentation of the Borough <Br>
# 7. Untapped markets <Br>
# 8. Saturated markets etc<Br>
# The list can go on...
#
# Eventhough well funded XYZ Company Ltd. need to choose the correct location to start its first venture.If this is successful they can replicate the same in other locations. First move is very important, thereby choice of location is very important.
# ### Target Audience:
# To recommend the correct location, XYZ Company Ltd has appointed me to lead of the Data Science team. The objective is to locate and recommend to the management which neighborhood of Newyork city will be best choice to start a restaurant. The Management also expects to understand the rationale of the recommendations made.
#
# This would interest anyone who wants to start a new restaurant in Newyork city.
# ### Success Criteria:
# The success criteria of the project will be a good recommendation of borough/Neighborhood choice to XYZ Company Ltd based on Lack of such restaurants in that location and nearest suppliers of ingredients.
| The Battle of Neighborhoods-Week1/The Battle of Neighborhoods-Week1-Part-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# %matplotlib inline
import os
import sys
from sklearn.metrics import precision_recall_curve, roc_curve, roc_auc_score
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import natsort
import torch
import torch.nn.functional as F
from monai.config import print_config
from monai.metrics import (
get_confusion_matrix,
compute_meandice,
compute_confusion_matrix_metric
)
print_config()
root_dir = "C:/Users/Karolinah/Documents/AICourse/testSet"
print(root_dir)
cnfmx = None
dice = torch.zeros(4)
metric_name = ["sensitivity", "specificity", "precision", "negative predictive value",
"miss rate", "fall out", "false discovery rate", "false omission rate",
"prevalence threshold", "threat score", "accuracy", "balanced accuracy",
"f1 score", "matthews correlation coefficient", "fowlkes mallows index",
"informedness", "markedness"]
dicecnt = torch.zeros(4)
for i in range(358):
#print('Working on test slice ' + str(i))
# loads the data
p = Path(root_dir)
files = []
files.append(list(p.glob('image'+str(i)+'.npz')))
files.append(list(p.glob('predArg'+str(i)+'.npz')))
files.append(list(p.glob('y'+str(i)+'.npz')))
image = np.load(files[0][0])
image = image['arr_0']
predArg = np.load(files[1][0])
predArg = predArg['arr_0']
y = np.load(files[2][0])
y = y['arr_0']
y = torch.from_numpy(y)
y = torch.cat((torch.sum(y,-1,True)==0,y),3)
y = y.permute((0,3,1,2))
predArg = F.one_hot(torch.from_numpy(predArg),4)
predArg = predArg.permute((0,3,1,2))
if cnfmx is None:
cnfmx = get_confusion_matrix(predArg,y)
continue
cnfmx = torch.vstack((cnfmx,get_confusion_matrix(predArg,y)))
tmpdice = torch.nanmean(compute_meandice(predArg,y),0)
for k, tmp in enumerate(tmpdice):
if ~torch.isnan(tmp):
dice[k] += tmp
dicecnt[k] += 1
print('confusion matrix')
print('each column is true positive, false positive, true negative and false negative')
print('each row is background, ET, edema, nonET')
cnfmx = torch.mean(cnfmx,0)
print(cnfmx)
print('\nfor each metric it follows: background, ET, edema, nonET')
for metnm in metric_name:
tmp = compute_confusion_matrix_metric(metnm,cnfmx)
print(metnm+": "+str(tmp))
dice = dice.float()
dicecnt = dicecnt.float()
dice /= dicecnt
print('\ndice')
print(dice)
print(torch.mean(dice))
print('done confusion matrix')
batch_plt_nb = 5
auc = np.zeros(4)
auccnt = np.zeros(4)
fig1 = plt.figure()
ax1 = plt.axes()
fig2 = plt.figure()
ax2 = plt.axes()
for i in range(358):
# for some reason 210 predSoft won't load, maybe corrupted
if i == 210:
continue
#print('Working on test slice ' + str(i))
# loads the data
p = Path(root_dir)
files = []
files.append(list(p.glob('predSoft'+str(i)+'.npz')))
files.append(list(p.glob('y'+str(i)+'.npz')))
predSoft = np.load(files[0][0])
predSoft = predSoft['arr_0']
y = np.load(files[1][0])
y = y['arr_0']
y = np.concatenate((np.sum(y,axis=-1,keepdims=True)==0,y),3)
y = y.transpose((0,3,1,2))
for j in range(y.shape[1]):
if np.count_nonzero(y[:,j,:,:].flatten()) != 0:
auc[j] += roc_auc_score(y[:,j,:,:].flatten(),predSoft[:,j,:,:].flatten())
auccnt[j] += 1
if i == int(batch_plt_nb):
print('plotted ' + str(i))
for j in range(y.shape[1]):
precision, recall, _ = precision_recall_curve(y[:,j,:,:].flatten(),
predSoft[:,j,:,:].flatten())
ax1.plot(recall,precision)
ax1.legend(('background','ET','Edema','NonET'))
ax1.set_ylabel('Precision')
ax1.set_xlabel('Recall')
for j in range(y.shape[1]):
fpr, tpr, _ = roc_curve(y[:,j,:,:].flatten(),
predSoft[:,j,:,:].flatten())
ax2.plot(fpr,tpr)
ax2.legend(('background','ET','Edema','NonET'))
ax2.set_ylabel('True Positive Rate')
ax2.set_xlabel('False Positive Rate')
auc /= auccnt
print(auc)
| ConfusionMatrix.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Sampling Implementation of QGRNN in Pennylane
import numpy
import math
import random
import numpy as np
import scipy
from matplotlib import pyplot as plt
from tqdm import tqdm
from scipy.optimize import minimize
import networkx as nx
import pennylane as qml
# +
# Initialize the device on which the simulation is run
qubit_number = 3
qubits = range(0, 3)
vqe_dev = qml.device("default.qubit", wires=qubit_number)
# Creates the graph structure of the quantum system
ising_graph = nx.Graph()
ising_graph.add_nodes_from(range(0, qubit_number))
ising_graph.add_edges_from([(0, 1), (1, 2), (2, 0)])
'''
nx.draw(ising_graph)
plt.show()
'''
# Creates parameters
matrix_params = [[random.randint(-200, 200)/100 for i in range(0, 3)] for j in range(0, 1)]
matrix_params.append([1, 1, 1])
print(matrix_params)
def draw(graph, pos, weights, biases, title):
label = {i:'{}'.format(i) for i in graph.nodes}
edge_options = {
"edge_color": weights,
"width": 4,
"edge_cmap": plt.cm.RdBu,
"edge_vmin" : -2,
"edge_vmax" : 2,
}
node_options = {
"node_color": biases,
"cmap": plt.cm.BrBG,
"vmin" : -2,
"vmax" : 2,
}
nx.draw_networkx_labels(graph, pos, label, font_color="w")
nodes = nx.draw_networkx_nodes(graph, pos, **node_options)
edges = nx.draw_networkx_edges(graph, pos, **edge_options)
edges.set_cmap(plt.cm.RdBu)
edges.set_clim(-2, 2)
plt.title(title)
plt.colorbar(nodes)
plt.colorbar(edges)
plt.show()
pos = nx.circular_layout(ising_graph)
draw(ising_graph, pos, matrix_params[0], matrix_params[1], 'Target Ising model')
# +
# Defines the RZZ gate, in terms of gates in the standard basis set
def RZZ(param, qubit1, qubit2):
qml.CNOT(wires=[qubit1, qubit2])
qml.RZ(param, wires=qubit2)
qml.CNOT(wires=[qubit1, qubit2])
# Defines a method that creates an even superposition of basis states
def even_superposition(qubits):
for i in qubits:
qml.Hadamard(wires=int(i.val))
# Method that prepares the QAOA/time-evolution layer (to be used later)
def qaoa_layer(param1, param2, qubits, ising_graph):
# Applies a layer of coupling gates (based on the graph)
for count, i in enumerate(ising_graph):
RZZ(param1[count], int(i[0]), int(i[1]))
# Applies a layer of RX gates
for count, i in enumerate(qubits):
qml.RX(param2[count], wires=int(i.val))
# Method that prepares a l0w-energy state
def decoupled_layer(param1, param2, qubits):
# Applies a layer of RZ and RX gates
for count, i in enumerate(qubits):
qml.RZ(param1[count], wires=int(i.val))
qml.RX(param2[count], wires=int(i.val))
# Method that creates the decoupled VQE ansatz
def vqe_circuit(parameters, qubits, depth):
even_superposition(qubits)
for i in range(0, int(depth.val)):
decoupled_layer(parameters[0], parameters[1], qubits)
# +
# Creates the Hamiltonian that we are attempting to learn
def create_hamiltonian_matrix(n, graph):
# Defines Pauli matrices
pauli_x = np.array([[0, 1], [1, 0]])
pauli_y = np.array([[0, -1j], [1j, 0]])
pauli_z = np.array([[1, 0], [0, -1]])
identity = np.array([[1, 0], [0, 1]])
matrix = np.zeros((2**n, 2**n))
# Creates the interaction component of the Hamiltonian
for count, i in enumerate(graph):
m = 1
for j in range(0, n):
if (i[0] == j or i[1] == j):
m = np.kron(m, pauli_z)
else:
m = np.kron(m, identity)
matrix = np.add(matrix, matrix_params[0][count]*m)
# Creates the "bias" component of the matrix
for i in range(0, n):
m = 1
for j in range(0, n):
if (j == i):
m = np.kron(m, pauli_x)
else:
m = np.kron(m, identity)
matrix = np.add(matrix, matrix_params[1][i]*m)
return matrix
def create_hamiltonian_matrix_param(n, graph, params):
# Defines Pauli matrices
pauli_x = np.array([[0, 1], [1, 0]])
pauli_y = np.array([[0, -1j], [1j, 0]])
pauli_z = np.array([[1, 0], [0, -1]])
identity = np.array([[1, 0], [0, 1]])
matrix = np.zeros((2**n, 2**n))
# Creates the interaction component of the Hamiltonian
for count, i in enumerate(graph):
m = 1
for j in range(0, n):
if (i[0] == j or i[1] == j):
m = np.kron(m, pauli_z)
else:
m = np.kron(m, identity)
matrix = np.add(matrix, params[0][count].val*m)
# Creates the "bias" component of the matrix
for i in range(0, n):
m = 1
for j in range(0, n):
if (j == i):
m = np.kron(m, pauli_x)
else:
m = np.kron(m, identity)
matrix = np.add(matrix, params[1][i].val*m)
return matrix
print(create_hamiltonian_matrix(qubit_number, ising_graph.edges))
# +
# Finds the eigenvector corresponding to the lowest energy state
val, vec = np.linalg.eig(create_hamiltonian_matrix(qubit_number, ising_graph.edges))
m = []
min_ind = list(val).index(min(val))
print(val[min_ind])
# +
# Defines the circuit that we will use to perform VQE on our Hamiltonian
def create_circuit(params1, params2, qubits, depth):
vqe_circuit([params1, params2], qubits, depth)
return qml.expval(qml.Hermitian(create_hamiltonian_matrix(qubit_number, ising_graph.edges), wires=range(qubit_number)))
qnode = qml.QNode(create_circuit, vqe_dev)
resulting_circuit = qnode([1, 1, 1], [1, 1, 1], qubits, 2)
print(resulting_circuit)
print(qnode.draw())
# +
def cost_function(params):
return qnode(params[0:3], params[3:6], qubits, 2)
# Creates the optimizer for VQE
optimizer = qml.GradientDescentOptimizer(stepsize=1.2)
optimizer2 = qml.AdamOptimizer(stepsize=0.8)
steps = 300
vqe_params = list([random.randint(-100, 100)/10 for i in range(0, 6)])
for i in range(0, steps):
vqe_params = optimizer2.step(cost_function, vqe_params)
if (i%50 == 0):
print("Cost Step "+str(i)+": "+str(cost_function(vqe_params)))
print(vqe_params)
# +
initial_graph = nx.Graph()
initial_graph.add_nodes_from(range(qubit_number, 2*qubit_number))
initial_graph.add_edges_from([(3, 4), (5, 3), (4, 5)])
# Creates the Trotterized time-evolution circuit
def state_evolve(qubits, time, depth, edges, par1, par2):
'''
#vqe_circuit(params, qubits, vqe_depth)
for j in range(0, int(depth.val)):
qaoa_layer([i*time.val/depth.val for i in par1], [i*time.val/depth.val for i in par2], qubits, edges)
'''
H = create_hamiltonian_matrix_param(qubit_number, edges, [par1, par2])
U = scipy.linalg.expm(complex(0,-1)*H*time.val)
qml.QubitUnitary(U, wires=[int(i.val) for i in qubits])
# +
# Implements the SWAP test between two qubit registers
def swap_test(control, register1, register2):
qml.Hadamard(wires=control)
for i in range(0, len(register1)):
qml.CSWAP(wires=[int(control), int(register1[i].val), int(register2[i].val)])
qml.Hadamard(wires=control)
# +
# Creates the device on which the program will be run
qgrnn_dev = qml.device("default.qubit", wires=2*qubit_number+1)
# Creates the ansatz for the quantum graph neural network
control = 2*qubit_number
def qgrnn(time, depth, reg1, reg2, vqe_depth, params1, params2, matrix_params):
vqe_circuit([vqe_params[0:3], vqe_params[3:6]], reg1, vqe_depth)
vqe_circuit([vqe_params[0:3], vqe_params[3:6]], reg2, vqe_depth)
state_evolve(reg1, time, depth, ising_graph.edges, matrix_params[0], matrix_params[1])
state_evolve(reg2, time, depth, ising_graph.edges, params1, params2)
swap_test(control, reg1, reg2)
return qml.expval(qml.PauliZ(control))
batch = 15
cap = 20
qubits1 = [0, 1, 2]
qubits2 = [3, 4, 5]
nn_qnode = qml.QNode(qgrnn, qgrnn_dev)
def nn_cost_function(params):
params = list(params)
params.append(1)
params.append(1)
params.append(1)
params = np.array(params)
#times_sampled = [np.random.uniform() * cap for i in range(0, batch)]
times_sampled = range(1, cap+1)
total_cost = 0
for i in times_sampled:
res = nn_qnode(i, 10, qubits1, qubits2, 2, params[0:3], params[3:6], matrix_params)
total_cost += 1 - res
print(1 - total_cost / len(times_sampled))
return (total_cost / len(times_sampled))
# +
import copy
# Optimizes the cost function
optimizer = qml.GradientDescentOptimizer(stepsize=1.2)
optimizer2 = qml.AdamOptimizer(stepsize=0.5)
steps = 60
v = [matrix_params[0][0], matrix_params[0][1], matrix_params[0][2]]
qgrnn_params = list([random.randint(-20, 20)/10 for i in range(0, 3)])
qgrnn_params = [2 for i in range(0, 3)]
i = copy.deepcopy(qgrnn_params)
#qgrnn_params = list([2 for i in range(0, 3)])
#qgrnn_params = [1.15, -1.13, -1.5, -1.76, 0.38, -1.5]
#qgrnn_params = [-1.26, 1.52, 0.74, 1.77, 1.35, 1.19]
nn_cost_function(v)
print("-----")
print(nn_qnode.draw())
def create_density_matrix(arr):
array = np.array(arr)
plt.matshow(array)
plt.colorbar()
plt.show()
out = minimize(nn_cost_function, x0=qgrnn_params, method="COBYLA", options={'maxiter':300, 'tol':1e-14})
print(out)
nn_cost_function(out['x'])
#create_density_matrix(np.real(np.outer(qgrnn_dev._state, np.conj(qgrnn_dev._state))))
''''
for i in range(0, steps):
qgrnn_params = optimizer.step(nn_cost_function, qgrnn_params)
print("Fidelity "+str(i)+": "+str(1 - nn_cost_function(qgrnn_params)))
print(qgrnn_params)
'''
draw(ising_graph, pos, matrix_params[0], matrix_params[1], 'Target Ising model')
draw(ising_graph, pos, i, [1, 1, 1], 'Initial Ising model')
draw(ising_graph, pos, out['x'], [1, 1, 1], 'Final Ising model')
'''
test = [i/10 for i in range(10, 50)]
y = []
for i in test:
y.append(nn_cost_function([i, 2.2, 3.0, 2.3, 5.0, 2.0]))
print(i)
plt.plot(test, y)
plt.show()
'''
# -
| notebooks/qgnn_pennylane_new.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from copy import deepcopy
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
# # Import Dataset
df_api = pd.read_csv("../db/cdv/cordova_API_xml.csv")
df_plugin_declaration = pd.read_csv("../db/cdv/cordova_PLUGIN_DECLARATION_xml.csv")
df_plugin_permission_declaration = pd.read_csv("../db/cdv/cordova_PLUGIN_PERMISSION_DECLARATION_xml.csv")
# df_feature = pd.read_csv("../db/fcordova/eatures.csv")
df_api.columns
l_api = list(df_api.columns)
l_api
df_api
df_plugin_declaration
df_plugin_permission_declaration
# # Analyse API calls
# ## The occurances
# ### The occurances of funcitons detected for each plugin in each APK
df_plugins_only = df_api.drop(columns=["apk_name"])
df_plugins_only
# ### The occurance of plugins for entire dataset
total_apk = df_plugins_only.shape[0]
print(f"Total APKs: {total_apk}")
df_cnt = df_plugins_only.astype(bool).sum(axis=0).sort_values(ascending=True)
df_cnt
# percentage of apks using each plugin
df_pct = df_cnt.apply(lambda x: round(x/total_apk*100, 2))
df_pct
# +
plt.figure(figsize=(14, 8))
sns.set(font_scale=1.5) # font size 2
sns_pct = sns.barplot(x=df_pct.values, y=df_pct.index)
# sns_pct.set_xticklabels(sns_pct.get_xticklabels(), rotation=45, horizontalalignment='right')
sns_pct.set_xticks(range(0, 101, 10))
plt.xlabel("Plugin Usage")
plt.ylabel("Plugin")
for p in sns_pct.patches:
# print(p)
sns_pct.annotate(
"{:.1%}".format(p.get_width()/100),
(p.get_width(), p.get_y() + p.get_height()),
fontsize=15,
color='black',
xytext=(2, 5),
textcoords='offset points')
plt.show()
# -
# ### Plugins declaration
df_plugin_declaration_only = df_plugin_declaration.drop(columns=["apk_name"])
df_plugin_dc = df_plugin_declaration_only
total_apk = df_plugin_dc.shape[0]
print(f"Total APKs: {total_apk}")
df_plugin_dc_cnt = df_plugin_dc.astype(bool).sum(axis=0).sort_values(ascending=True)
df_plugin_dc_cnt
# percentage of apks using each plugin
df_plugin_dc_pct = df_plugin_dc_cnt.apply(lambda x: round(x/total_apk*100, 2))
df_plugin_dc_pct
# +
plt.figure(figsize=(14, 8))
sns.set(font_scale=1.5) # font size 2
sns_pct = sns.barplot(x=df_plugin_dc_pct.values, y=df_plugin_dc_pct.index)
# sns_pct.set_xticklabels(sns_pct.get_xticklabels(), rotation=45, horizontalalignment='right')
sns_pct.set_xticks(range(0, 101, 10))
plt.xlabel("Plugin Declration")
plt.ylabel("Plugin")
for p in sns_pct.patches:
# print(p)
sns_pct.annotate(
"{:.1%}".format(p.get_width()/100),
(p.get_width(), p.get_y() + p.get_height()),
fontsize=15,
color='black',
xytext=(2, 5),
textcoords='offset points')
plt.show()
# -
# ### Plugins permission
df_plugin_permission_declaration_only = df_plugin_permission_declaration.drop(columns=["apk_name"])
df_plugin_permission_dc = df_plugin_permission_declaration_only
total_apk = df_plugin_permission_dc.shape[0]
print(f"Total APKs: {total_apk}")
df_plugin_permission_dc_cnt = df_plugin_permission_dc.astype(bool).sum(axis=0).sort_values(ascending=True)
df_plugin_permission_dc_cnt
# percentage of apks using each plugin
df_plugin_permission_dc_pct = df_plugin_permission_dc_cnt.apply(lambda x: round(x/total_apk*100, 2))
df_plugin_permission_dc_pct
# +
plt.figure(figsize=(14, 8))
sns.set(font_scale=1.5) # font size 2
sns_pct = sns.barplot(x=df_plugin_permission_dc_pct.values, y=df_plugin_permission_dc_pct.index)
# sns_pct.set_xticklabels(sns_pct.get_xticklabels(), rotation=45, horizontalalignment='right')
sns_pct.set_xticks(range(0, 101, 10))
plt.xlabel("Plugin Declration")
plt.ylabel("Plugin")
for p in sns_pct.patches:
# print(p)
sns_pct.annotate(
"{:.1%}".format(p.get_width()/100),
(p.get_width(), p.get_y() + p.get_height()),
fontsize=15,
color='black',
xytext=(2, 5),
textcoords='offset points')
plt.show()
# -
# ### Plugin Uages VS. Plugin Dclaration
df_usage_cnt = df_plugins_only.astype(bool).sum(axis=0)
df_usage_cnt = df_usage_cnt.apply(lambda x: round(x/total_apk*100, 2))
df_declaration_cnt = df_plugin_dc.astype(bool).sum(axis=0)
df_declaration_cnt = df_declaration_cnt.apply(lambda x: round(x/total_apk*100, 2))
d_plugin = {"plugin": list(df_usage_cnt.axes)}
d_usage = {"usage": list(df_usage_cnt.values)}
d_declaration = {"usage": list(df_declaration_cnt.values)}
d_plugin_vs = {
"plugin": list(df_plugins_only.columns),
"usage": list(df_usage_cnt.values),
"declaration": list(df_declaration_cnt.values)
}
# print(d_plugin_vs)
df_plugin_vs = pd.DataFrame(data=d_plugin_vs)
df_plugin_vs
# order by value in declaration
df_plugin_vs_order = df_plugin_vs.sort_values(by=['declaration'])
df_plugin_vs_order
# ax = sns.barplot(x="Percentage", y="Plugin", hue="plugin", data=df_plugin_vs_order)
fig, ax1 = plt.subplots(figsize=(18, 15))
tidy = df_plugin_vs_order.melt(id_vars='plugin').rename(columns=str.title)
# print(tidy)
sns_pct = sns.barplot(x='Value', y='Plugin', hue='Variable', data=tidy, ax=ax1)
# sns.despine(fig)
for p in sns_pct.patches:
# print(p)
sns_pct.annotate(
"{:.1%}".format(p.get_width()/100),
(p.get_width(), p.get_y() + p.get_height()),
fontsize=15,
color='black',
xytext=(2, 5),
textcoords='offset points')
plt.xlabel("")
plt.ylabel("")
plt.title(f'Plugin Usage VS. Plugin Declaration for {total_apk} APKs')
plt.show()
# # Plugin Usage VS Plugin Declaration VS Plugin Permission Declaration
# ## Ingore plugins don't require any permission
df_usage_cnt = df_plugins_only.astype(bool).sum(axis=0)
df_usage_cnt = df_usage_cnt.apply(lambda x: round(x/total_apk*100, 2))
df_declaration_cnt = df_plugin_dc.astype(bool).sum(axis=0)
df_declaration_cnt = df_declaration_cnt.apply(lambda x: round(x/total_apk*100, 2))
df_permission_cnt = df_plugin_permission_dc.astype(bool).sum(axis=0)
df_permission_cnt = df_permission_cnt.apply(lambda x: round(x/total_apk*100, 2))
d_plugin = {"plugin": list(df_usage_cnt.axes)}
d_usage = {"usage": list(df_usage_cnt.values)}
d_declaration = {"usage": list(df_declaration_cnt.values)}
df_permission = {"usage": list(df_permission_cnt.values)}
d_plugin_vs = {
"plugin": list(df_plugins_only.columns),
"permission": list(df_permission_cnt.values),
"usage": list(df_usage_cnt.values),
"declaration": list(df_declaration_cnt.values),
}
# print(d_plugin_vs)
df_plugin_vs = pd.DataFrame(data=d_plugin_vs)
df_plugin_vs
# ## Ingore the plugins don't require any permission
from cdv_plugins import get_plugin_permission_require_l
l_plugins_require = get_plugin_permission_require_l()
l_plugins_require
df_plugin_vs
update_df_plugin_vs = df_plugin_vs[df_plugin_vs["plugin"].isin(l_plugins_require)]
update_df_plugin_vs
# ax = sns.barplot(x="Percentage", y="Plugin", hue="plugin", data=df_plugin_vs_order)
fig, ax1 = plt.subplots(figsize=(18, 15))
tidy = update_df_plugin_vs.melt(id_vars='plugin').rename(columns=str.title)
# print(tidy)
sns_pct = sns.barplot(x='Value', y='Plugin', hue='Variable', data=tidy, ax=ax1)
# sns.despine(fig)
for p in sns_pct.patches:
# print(p)
sns_pct.annotate(
"{:.1%}".format(p.get_width()/100),
(p.get_width(), p.get_y() + p.get_height()),
fontsize=15,
color='black',
xytext=(2, 5),
textcoords='offset points')
plt.xlabel("")
plt.ylabel("")
plt.title(f'Plugin Usage VS. Plugin Declaration VS Plugin Permission Declration for {total_apk} APKs')
plt.show()
| apk_analysis/data_analysis/cdv_plugins_declaration_xml-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/oumaima61/my-machine-learning-projects/blob/master/CNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="jN5KK75rzoc1"
# How Computers Perceive Images— Images as Data Points
# + [markdown] id="EC5aaBbkz5fp"
# Grayscale Image
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="BYR83Wp-x8Wg" outputId="2e03b376-8dd2-4d54-ec20-1d2022039adc"
from skimage import data
import numpy as np
import matplotlib.pyplot as plt
image = data.binary_blobs()
plt.imshow(image, cmap='gray')
print(f'The shape of the given image is: ',image.shape)
# + [markdown] id="VFvbLuNLz7_P"
# Colored Image
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="C1FJ6TEVzhQV" outputId="c46123e9-eaa6-43cd-ce27-96617ed05866"
color_image = data.astronaut()
plt.imshow(color_image)
# calculate shape
print(f'The shape of the given image is: ',color_image.shape)
# + [markdown] id="F8UhNwBr2-VP"
# Loading the Fashion-MNIST dataset in Keras
# + id="IcYnI-c70B4n"
from tensorflow import keras
from tensorflow.keras import layers
# + colab={"base_uri": "https://localhost:8080/"} id="Wukg6gvQ2vAB" outputId="0b5eaaa2-27bf-45b4-9482-53fb4e2ba86f"
from keras.datasets import fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
np.random.seed(42)
# + id="bvUnjof323rJ"
#creating label names
label_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# + [markdown] id="hW2J7RTC3J1M"
# data expolaration
#
# + colab={"base_uri": "https://localhost:8080/"} id="ZO8UaHu63Pen" outputId="3e3b0b30-6745-42e5-a6d3-b4666c927df9"
#Training Data
print(train_images.shape)
print(len(train_labels)) # Total no. of training images
# + colab={"base_uri": "https://localhost:8080/"} id="Px-yT8__3Voy" outputId="6287a0bb-8012-486d-c1e6-b125dd69d0a6"
#Testing Data
print(test_images.shape)
print(len(test_labels)) # Total no. of testing images
# + colab={"base_uri": "https://localhost:8080/"} id="JzFI9hRH3fWy" outputId="328f0cdd-f023-4be9-f86d-91ad679a836c"
test_labels
# + [markdown] id="rew92AV73p-f"
# Preprocessing the Data
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="_Bwk2OOP3lWy" outputId="b5d9b872-148d-4f3f-bc3f-0382fc430a2d"
plt.imshow(train_images[1],cmap='gray')
plt.grid(False)
plt.colorbar()
plt.show()
# + id="QOULKB9L31Ec"
#Rescaling the test and train images
train_images = train_images / 255.0
test_images = test_images / 255.0
# + colab={"base_uri": "https://localhost:8080/", "height": 459} id="71qPMq6u38VI" outputId="5059b5ca-8276-40c1-84a3-14bb751dac4b"
plt.figure(figsize=(8,10))
for i in range(20):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap='gray')
plt.xlabel(label_names[train_labels[i]])
# + colab={"base_uri": "https://localhost:8080/"} id="aMvOKFx34DF4" outputId="8371b06a-f400-4d8e-b74e-6b30edabd80e"
# Reshaping the test and train images
train_images = train_images.reshape(60000, 28, 28, 1)
test_images = test_images.reshape(10000, 28, 28, 1)
print(train_images.shape)
print(test_images.shape)
# + [markdown] id="kmeivR4U4Q35"
# Building the Network architecture
#
# We will configure the layers of the model first and then proceed with compiling the model.
# Layers
#
# A layer is a core building block of a neural network. It acts as a kind of data processing module. Layers extract representations out of the input data that is fed into them. Inherently, deep learning consists of stacking up these layers to form a model. We already learned about the various layers used in a CNN in the section above.
# Model
#
# A model is a linear stack of layers. It is like a sieve for data processing made of a succession of increasing refined data filters called layers. The simplest model in Keras is sequential, which is built by stacking layers sequentially.
# + id="LEl8QfZY4Kl3"
model = keras.Sequential([
keras.layers.Conv2D(32, (3,3), padding='same', activation='relu',
input_shape=(28, 28, 1)),
keras.layers.MaxPooling2D((2, 2), strides=2),
#Add another convolution
keras.layers.Conv2D(64, (3,3), padding='same', activation='relu'),
keras.layers.MaxPooling2D((2, 2), strides=2),
#Flatten the output.
keras.layers.Flatten(),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
# + colab={"base_uri": "https://localhost:8080/"} id="p_4ZGXCS5Tlg" outputId="c3845223-4471-4630-e302-1dcdc0865bf9"
model.summary()
# + [markdown] id="TTsaDcOv5s3s"
# Let's now look at how we defined the model architecture in detail
# Convolution Layer
#
# We started with a convolutional layer, specifying the number of convolutions that we want to generate. Here we have chosen '32'.
# We have also specified the size of the convolutional matrix, in this case a 3X3 grid. We have also used padding to retain the size of the original image.
# We have used relu (rectified linear unit) as the activation function. A rectified linear unit has an output of 0 if the input is less than 0, and raw output otherwise. That is, if the input is greater than 0, the output is equal to the input. An activation function is the non-linear transformation that we do over the input signal. This transformed output is then sent to the next layer of neurons as input.
# Finally, we enter the shape of the input data.
#
# Pooling Layer
#
# Every convolution layer is then followed by a max-pooling layer. A max-pooling layer will downsample an image but will retain the features.
# Flattened Layer
#
# Finally, we will flatten the images into a one-dimensional vector.
# Dense
#
# This layer consists of a 128-neuron, followed by a 10-node softmax layer. Each node represents a class of clothing. The final layer takes input from the 128 nodes in the layer before it, and outputs a value in the range [0, 1], representing the probability that the image belongs to that class. The sum of all 10 node values is 1. We'll also include activation functions in the network to introduce non-linearity. Here we have used ReLU. The last layer is a 10-way softmax layer which will return an array of 10 probability scores. Each score will denote the probability that the current image belongs to one of the 10 given classes.
#
# + [markdown] id="zFmvFe6D53Ts"
# Compile the Model
#
# After the model has been built, we enter the compilation phase, which primarily consists of three essential elements:
#
# Loss Function: loss (Predicted — Actual value) is the quantity that we try to minimize during the training of a neural network.
# Optimizer: determines how the network will be updated based on the loss function. Optimizers could be the RMSProp optimizer, SGD with momentum, and so on.
# Metrics: to measure the accuracy of the model. In this case, we will use accuracy.
#
# + id="9M64G80z5yBO"
model.compile(optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="O8frZw0B6Dve" outputId="cefb6d54-bca5-49c9-c874-689ae88003a8"
model.fit(train_images, train_labels, epochs=3,batch_size=32)
# + [markdown] id="njq0UO_67Qly"
# Model Evaluation
# + colab={"base_uri": "https://localhost:8080/"} id="WUiRRCRL7CJp" outputId="8881f604-5b06-40a1-d379-a3893515c529"
test_loss, test_accuracy = model.evaluate(test_images, test_labels)
print('Accuracy on test dataset:', test_accuracy)
# + id="adI4QM7j7cCT"
predictions = model.predict(test_images)
# + colab={"base_uri": "https://localhost:8080/"} id="6J3nhMqH7npQ" outputId="9a92d998-a5f3-4bac-dd8a-c54359a12eae"
predictions[10]
# + colab={"base_uri": "https://localhost:8080/"} id="uk8oLuDB7se_" outputId="fceb9c5d-7fee-4739-ccbd-7d15a4b65370"
np.argmax(predictions[10])
# + colab={"base_uri": "https://localhost:8080/"} id="qjCVpE8P7yCk" outputId="f4d89a92-4df8-41aa-ddde-a3041e622d59"
test_labels[10]
| CNN.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import networkx as nx
import matplotlib.pyplot as plt
import time
# modules specific to this project
import network as nw
import physics
import timemarching as tm
import plotter
import logger
# %% [markdown]
# ### 1. Define the broadcasting channels of the network
# This is done by creating a list of the channel names. The names are arbitrary and can be set by the user, such as 'postive', 'negative' or explicit wavelenghts like '870 nm', '700 nm'. Here I chose the colors 'red' and 'blue', as well as bias with same wavelength as 'blue'.
# %%
channel_list = ['red', 'blue','green']
# Automatically generate the object that handles them
channels = {channel_list[v] : v for v in range(len(channel_list))}
# %% [markdown]
# ### 2. Define the layers
# Define the layers of nodes in terms of how they are connected to the channels. Layers and weights are organized in dictionaries. The input and output layers do not need to be changed, but for the hidden layer we need to specify the number of nodes N and assign the correct channels to the input/output of the node.
# %%
# Create layers ordered from 0 to P organized in a dictionary
layers = {}
Nreservoir = 100
# An input layer automatically creates on node for each channel that we define
layers[0] = nw.InputLayer(input_channels=channels)
# Forward signal layer
layers[1] = nw.HiddenLayer(Nreservoir//2, output_channel='blue',excitation_channel='blue',inhibition_channel='red')
# Inhibiting memory layer
layers[2] = nw.HiddenLayer(Nreservoir//2, output_channel='red' ,excitation_channel='blue',inhibition_channel='red')
layers[3] = nw.OutputLayer(output_channels=channels) # similar to input layer
# %% [markdown]
# ### 3. Define existing connections between layers
# The weights are set in two steps.
# First the connetions between layers are defined. This should be done using the keys defined for each layer above, i.e. 0, 1, 2 ... for input, hidden and output layers, respectively. The `connect_layers` function returns a weight matrix object that we store under a chosen key, for example `'inp->hid'`.
# Second, the specific connections on the node-to-node level are specified using the node index in each layer
# %%
# Define the overall connectivity
weights = {}
# The syntax is connect_layers(from_layer, to_layer, layers, channels)
# Connections into the reservoir from input layer
weights['inp->hd0'] = nw.connect_layers(0, 1, layers, channels)
weights['inp->hd1'] = nw.connect_layers(0, 2, layers, channels)
# Connections between reservoir nodes
weights['hd0->hd1'] = nw.connect_layers(1, 2, layers, channels)
weights['hd1->hd0'] = nw.connect_layers(2, 1, layers, channels)
# Intralayer connections
weights['hd0->hd0'] = nw.connect_layers(1, 1, layers, channels)
weights['hd1->hd1'] = nw.connect_layers(2, 2, layers, channels)
# Connections to output
weights['hd0->out'] = nw.connect_layers(1, 3, layers, channels)
weights['hd1->out'] = nw.connect_layers(2, 3, layers, channels)
# Connections back into reservoir from output
weights['out->hd0'] = nw.connect_layers(3, 1, layers, channels)
weights['out->hd1'] = nw.connect_layers(3, 2, layers, channels)
# %% [markdown]
# Setup parameters for the network
# %%
sparsity = 0.90
spectral_radius = 1.0
# One number per channel
input_scaling = [1.0,1.0,1.0]
output_scaling= [1.0,1.0,1.0]
Nreservoir = 100
# %% [markdown]
# #### Setup the input weights
# %%
# We will generate some random numbers
import numpy as np
rng = np.random.RandomState(42)
# Input weights to all of the input units
W_in = rng.rand(Nreservoir, len(channels))
# Ask the weights object which dimensions to be in
weights['inp->hd0'].ask_W()
# Put each weight column in a specific channel
for key in channels :
k = channels[key]
W_key = np.zeros_like(W_in)
W_key[:,k] = W_in[:,k]
weights['inp->hd0'].set_W(key,input_scaling[k]*W_key[:Nreservoir//2]) # first half
weights['inp->hd1'].set_W(key,input_scaling[k]*W_key[Nreservoir//2:]) # second half
# %% [markdown]
# #### Setup the reservoir weights
# %%
W_partition = {'hd0->hd0':(0,Nreservoir//2,0,Nreservoir//2),
'hd0->hd1':(Nreservoir//2,Nreservoir,0,Nreservoir//2),
'hd1->hd1':(Nreservoir//2,Nreservoir,Nreservoir//2,Nreservoir),
'hd1->hd0':(0,Nreservoir//2,Nreservoir//2,Nreservoir)}
# Generate a large matrix of values for each reservoir channel (red and blue)
W_res = rng.rand(2, Nreservoir, Nreservoir)
# Delete the fraction of connections given by sparsity:
W_res[rng.rand(*W_res.shape) < sparsity] = 0
# Delete any remaining diagonal elements
for k in range(0,Nreservoir) :
W_res[:,k,k] = 0.
# Normalize this to have the chosen spectral radius, once per channel
for k in range(0,2) :
radius = np.max(np.abs(np.linalg.eigvals(W_res[k])))
# rescale them to reach the requested spectral radius:
W_res[k] = W_res[k] * (spectral_radius / radius)
weights['hd0->hd1'].ask_W()
for connection in W_partition :
for key in list(channels.keys())[:2] :
k=channels[key]
A,B,C,D = W_partition[connection]
weights[connection].set_W(key,W_res[k,A:B,C:D])
# %% [markdown]
# #### Setup the output weights
# %%
# Output weights from reservoir to the output units
W_out = rng.rand(len(channels),Nreservoir)
# Ask the weights object which dimensions to be in
weights['hd0->out'].ask_W()
# Put each weight column in a specific channel
for key in channels :
if key != 'green' :
k = channels[key]
W_key = np.zeros_like(W_out)
W_key[k] = W_out[k]
weights['hd0->out'].set_W(key,output_scaling[k]*W_key[:,:Nreservoir//2])
weights['hd1->out'].set_W(key,output_scaling[k]*W_key[:,Nreservoir//2:])
# Output weights back into reservoir
# %% [markdown]
# ### 4. Visualize the network
# The `plotter` module supplies functions to visualize the network structure. The nodes are named by the layer type (Input, Hidden or Output) and the index. To supress the printing of weight values on each connection, please supply `show_edge_labels=False`.
#
# #### Available layouts:
# **multipartite**: Standard neural network appearance. Hard to see recurrent couplings within layers.
# **circular**: Nodes drawn as a circle
# **shell**: Layers drawn as concetric circles
# **kamada_kawai**: Optimization to minimize weighted internode distance in graph
# **spring**: Spring layout which is standard in `networkx`
#
# #### Shell layout
# This is my current favorite. It is configured to plot the input and output nodes on the outside of the hidden layer circle, in a combined outer concentric circle.
# %%
plotter.visualize_network(layers, weights, exclude_nodes={3:['O2']},node_size=100,layout='shell', show_edge_labels=False)
# %% [markdown]
# ### 5. Specify the physics of the nodes
# Before running any simulations, we need to specify the input currents and the physics of the hidden layer nodes. Parameters can either be specified directly or coupled from the `physics` module.
# %%
# Specify an exciting current square pulse and a constant inhibition
# Pulse train of 1 ns pulses
t_blue = [(6.0,7.0), (11.0,12.0), (16.0,17.0)] # at 6 ns, 11 ns, and 16 ns
t_blue = [(5.0,15.0)]#, (11.0,12.0), (16.0,17.0)] # at 6 ns, 11 ns, and 16 ns
I_blue = 100 # nA
# Try to modulate the nodes with red input
t_red = [(8.0,9.0), (12.0,13.0)] # at 6 ns, 11 ns, and 16 ns
# Constant inhibition to stabilize circuit
I_red = 0.0 # nA
# Use the square pulse function and specify which node in the input layer gets which pulse
layers[0].set_input_func(channel='blue',func_handle=physics.square_pulse, func_args=(t_blue, I_blue))
# Use the costant function to specify the inhibition from I0 to H0
#layers[0].set_input_func(channel='red', func_handle=physics.constant, func_args=I_red)
layers[0].set_input_func(channel='red', func_handle=physics.square_pulse, func_args=(t_red, I_red))
# %%
# Specify two types of devices for the hidden layer
# 1. Propagator (standard parameters)
propagator = physics.Device('device_parameters.txt')
propagator.print_parameter('Cstore')
#propagator.set_parameter('Rstore',1e6)
# 2. Memory (modify the parameters)
memory = physics.Device('device_parameters.txt')
#memory.set_parameter('Rstore',1e6)
#memory.set_parameter('Cstore',2e-15)
# a 3e-15 F capacitor can be build by 800x900 plates 20 nm apart
memory.print_parameter('Cstore')
# %%
# Specify the internal dynamics by supplying the RC constants to the hidden layer (six parameters)
layers[1].assign_device(propagator)
layers[2].assign_device(memory)
# Tweak the threshold voltage
Vthres=0.27
layers[1].Vthres=Vthres
layers[2].Vthres=Vthres
# Calculate the unity_coeff to scale the weights accordingly
unity_coeff, _ = propagator.inverse_gain_coefficient(propagator.eta_ABC, Vthres)
print(f'Unity coupling coefficient calculated as unity_coeff={unity_coeff:.4f}')
# %% [markdown]
# ### 6. Evolve in time
# %%
# Start time t, end time T
t = 0.0
T = 25.0 # ns
# To sample result over a fixed time-step, use savetime
savestep = 0.1
savetime = savestep
# These parameters are used to determine an appropriate time step each update
dtmax = 0.1 # ns
dVmax = 0.005 # V
nw.reset(layers)
# Create a log over the dynamic data
time_log = logger.Logger(layers,channels) # might need some flags
start = time.time()
while t < T:
# evolve by calculating derivatives, provides dt
dt = tm.evolve(t, layers, dVmax, dtmax )
# update with explicit Euler using dt
# supplying the unity_coeff here to scale the weights
tm.update(dt, t, layers, weights, unity_coeff)
t += dt
# Log the progress
if t > savetime :
# Put log update here to have (more or less) fixed sample rate
# Now this is only to check progress
print(f'Time at t={t} ns')
savetime += savestep
time_log.add_tstep(t, layers, unity_coeff)
end = time.time()
print('Time used:',end-start)
# This is a large pandas data frame of all system variables
result = time_log.get_timelog()
# %% [markdown]
# ### 7. Visualize results
# Plot results specific to certain nodes
# %%
#nodes = ['H0','H1','H2','H3','H4']
nodes = ['H0','K0']
plotter.plot_nodes(result, nodes)
# %% [markdown]
# For this system it's quite elegant to use the `plot_chainlist` function, taking as arguments a graph object, the source node (I1 for blue) and a target node (O1 for blue)
# %%
# Variable G contains a graph object descibing the network
G = plotter.retrieve_G(layers, weights)
plotter.plot_chainlist(result,G,'I1','K0')
# %% [markdown]
# Plot specific attributes
# %%
attr_list = ['Vgate','Vexc']
plotter.plot_attributes(result, attr_list)
# %% [markdown]
# We can be totally specific if we want. First we list the available columns to choose from
# %%
print(result.columns)
# %%
plotter.visualize_dynamic_result(result, ['I0-Iout-red','I1-Iout-blue'])
# %%
plotter.visualize_dynamic_result(result, ['H0-Iout','H0-Pout','K0-Iout','K0-Pout'])
# %%
plotter.visualize_transistor(propagator.transistorIV,propagator.transistorIV_example())
# %%
plotter.visualize_LED_efficiency(propagator.eta_example(propagator.eta_ABC))
# %%
| echostatenetwork/ReservoirNetwork.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# from IPython.core.display import HTML
# def css_styling():
# styles = open("./styles/custom.css", "r").read()
# return HTML(styles)
# css_styling()
# + [markdown] slideshow={"slide_type": "slide"}
# ### BEFORE YOU DO ANYTHING...
# In the terminal:
# 1. Navigate to __inside__ your ILAS_Python repository.
# 2. __COMMIT__ any un-commited work on your personal computer.
# 3. __PULL__ any changes *you* have made using another computer.
# 4. __PULL__ textbook updates (including homework answers).
# + [markdown] slideshow={"slide_type": "slide"}
# 1. __Open Jupyter notebook:__ Start >> Programs (すべてのプログラム) >> Programming >> Anaconda3 >> JupyterNotebook
# 1. __Navigate to the ILAS_Python folder__.
# 1. __Open today's seminar__ by clicking on 8_Plotting.
# + [markdown] slideshow={"slide_type": "slide"}
# # Plotting
#
# # Lesson Goal
#
# To use the Matplotlib Python package to visualise results.
# + [markdown] slideshow={"slide_type": "slide"}
# # Objectives
#
# Matplotlib is the most widel used Python module for plotting.
#
# It's functionality is huge.
#
# Today's objective is to:
# - introduce the core functionality of Matplotlib as a basis that you can build on.
# - focus on tasks where plotting *programmatically* can be advantageous over other *computational* plotting methods.
#
# Use online resources including the Matplotlib Gallery (http://matplotlib.org/gallery.html), or Github (http://gree2.github.io/python/2015/04/10/python-matplotlib-plotting-examples-and-exercises), as a source of tutorials and sample code for plotting.
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Why we are studying this
#
# Plotting is one of the most important computational tasks used by engineers and scientists.
#
# Plots are used for:
#
# - checking computed results visually
# - understanding data
# - presenting data
#
#
#
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Packages
# We will be using:
# - Numpy for numerical computation.
# - Matplotlib to produce figures and animations.
# - Ipywidgets to make our plots interactive
# + slideshow={"slide_type": "slide"}
import numpy as np
# Plotting and animation
import matplotlib
from matplotlib import animation, rc
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# %matplotlib inline
# Interactive plotting
try:
import ipywidgets
except ImportError:
try:
# !{sys.executable} -m pip -q install ipywidgets
import ipywidgets
except ImportError:
# !{sys.executable} -m pip -q --user install ipywidgets
finally:
# !jupyter nbextension enable --py widgetsnbextension
print("You will need to refresh your browser page")
from ipywidgets import interact
# Viewing animations in the notebook
from IPython.display import HTML
# Plotting in 3D
from mpl_toolkits.mplot3d import axes3d
# Viewing .gif animations in the notebook
from IPython.display import Image
from IPython.display import display
# + [markdown] slideshow={"slide_type": "slide"}
# ## Line and Scatter Plots
# In the last seminar you learnt to use basic line and scatter plots.
#
# This section will discusss different ways that you can manipulate the appearance of these plots.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# For a sample data set of values of $x$ and corresponding values of $f$:
# + [markdown] slideshow={"slide_type": "subslide"}
# ...or a scatter plot by adding the `formatstring 'o'`...
# -
x = [-1, 3, 4, 8 , 10]
f = [-1, -2, 7, 13 , 1]
# We can produce a line plot..
# + slideshow={"slide_type": "subslide"}
plt.plot(x, f,)
# -
plt.plot(x, f, 'o')
# + [markdown] slideshow={"slide_type": "subslide"}
# ...or a line plot with data points by adding the `formatstring '-o'`.
# -
plt.plot(x, f, '-o')
# + [markdown] slideshow={"slide_type": "subslide"}
# Notice the statement `[<matplotlib.lines.Line2D at 0x30990b0>]` appears each time (the numbers on your computer may look different).
#
# This is a handle to the line that is created with the last command in the code block (in this case `plt.plot(x, y)`).
#
# You can avoid printing this by putting a semicolon after the last command in the code block (so type `plot(x, y);`).
#
# (It can be useful to store this handle in a variable - we will discuss this later).
# + [markdown] slideshow={"slide_type": "subslide"}
# The `plot` function can take many arguments.
#
# Summary so far, `plot` can take:
# - One argument `plot(y)`, which plots `y` values along the vertical axis and enumerates the horizontal axis starting at 0.
# - Two arguments, `plot(x, y)`, which plots `y` vs `x`.
# - Three arguments `plot(x, y, formatstring)`, which plots `y` vs `x` using colors and markers defined in `formatstring`.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# Using the `formatstring` argument we can chnage:
# - the colour of the plot
# - the style of the markers
# - the style of the line
# + [markdown] slideshow={"slide_type": "subslide"}
# In the example below:
# - `r` makes the plot red
# - `x` makes the markers crosses
# - `-` makes the line solid
# -
plt.plot(x, f, '-xr');
# Note the semi-colon when calling the `plot` function.
# + [markdown] slideshow={"slide_type": "subslide"}
# Available colours, maker styles and line styles can be found in the matplotlib documentation.
# - Colour: https://matplotlib.org/2.0.2/api/colors_api.html
# - Marker style: https://matplotlib.org/api/markers_api.html
# - Line style: https://matplotlib.org/devdocs/gallery/lines_bars_and_markers/line_styles_reference.html
# + [markdown] slideshow={"slide_type": "subslide"}
# The `plot` function also takes a large number of keyword arguments.
#
# For example, to plot a line with width 6 (the default is 1):
# -
plt.plot(x, f, '-xr', linewidth=6);
# + [markdown] slideshow={"slide_type": "subslide"}
# Remeber keyword arguments must always appear after regular arguments when calling the function.
#
# Documentation of all available keyword arguments can be found here: https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html#matplotlib.pyplot.plot
# + [markdown] slideshow={"slide_type": "subslide"}
# Other methods such as `legend`, `xlabel`, `ylabel` and `title` can be used to add useful information to a plot.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Adding labels, legend and title.
# The location of the legend can be set the the 'best' in terms of not obscuring data.
#
# The position of the legend can also be set expllicitly instead. The location codes acan be found here: https://matplotlib.org/api/legend_api.html
#
# Using the $ sign can be used to italicise a string.
#
# The font size of all strings in the figure can be selected.
# + slideshow={"slide_type": "subslide"}
# Use the label kwarg to create a name to use in the figure legend.
plt.plot(x, f, '-xr', label="data 1")
# Legend
plt.legend(loc='best', fontsize=12)
# Axes labels
plt.xlabel('$x$', fontsize=20)
plt.ylabel('$f$', fontsize=20)
# Title
plt.title("Simple plot of $f$ against $x$", fontsize=18);
# -
# Note the semi-colon when calling the `title` function.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Setting the axis limits
# Example: plot the funstion $sin(x)$ for 100 equally spaced values of x between $0$ and $4 \pi$ (inclusive).
# + slideshow={"slide_type": "subslide"}
num_points = 100
x = np.linspace(0, 4*np.pi, num_points)
f = np.sin(x)
# Plot graph
plt.plot(x, f);
# Label axis
plt.xlabel('$x$')
plt.ylabel('$\sin(x)$')
# + [markdown] slideshow={"slide_type": "subslide"}
# The zero on the x axis is offset by an unknown amount.
#
# We can improve the appearance of the plot by specifying the $x$ limits:
# + slideshow={"slide_type": "subslide"}
# Plot graph
plt.plot(x, f);
# Label axis
plt.xlabel('$x$')
plt.ylabel('$\sin(x)$')
# Use the start and end values in x as x limits
plt.xlim(x[0], x[-1])
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Multiple Plots
# Plots can be overlaid by called `plt.plot(...` multiple times.
# +
# Plot sin(x) and cos(x), and add label for each
plt.plot(x, np.sin(x), label="$\sin(x)$");
plt.plot(x, np.cos(x), label="$\cos(x)$");
# Label the x-axis
plt.xlabel('$x$');
# Add legend
plt.legend();
# Use the start and end values in x as x limits
plt.xlim(x[0], x[-1]);
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Saving a Plot
# A plot can be saved as a file for your to use e.g. in a report.
#
# A file name should be given.
#
# The file type is specified using the file extension.
#
# `/` can be used to seperate the names of nested folders.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# In the example below, the graph is saved as a pdf and a png file in the `img` folder in your directory.
#
# Run the code in the cell below then open the folder `img` using the file browser on your computer to verify that the files were generated by noting the time stamp shown for each file.
# + slideshow={"slide_type": "-"}
# Save plot to file
plt.savefig("img/my-plot.pdf")
plt.savefig("img/my-plot.png")
# + [markdown] slideshow={"slide_type": "subslide"}
# __Try it yourself__
# <br>In the cell below:
# - create a green line plot of the function $sin^2(x)$ (or $(sin(x))^2$) using the range of $x$ from the last example.
# - add a blue triangular marker at $x=\frac{\pi}{2}, \pi, \frac{3\pi}{2}, 2\pi, \frac{5\pi}{2}, 3\pi, \frac{7\pi}{2}$ and $4\pi$.
# - label the axes
# - save the figure as a .jpg file in the `img` folder in your repository.
#
# +
# line and scatter graphs
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Bar charts and Histograms
# ### Bar charts
# To represent data as a bar chart, for example, the number of students in each year of a degree program:
# -
year_groups = ('B1', 'B2', 'B3', 'M1', 'M2')
num_students = (500, 332, 425, 300, 200)
# + slideshow={"slide_type": "subslide"}
# Create an array with the position of each bar along the x-axis
x_pos = np.arange(len(year_groups))
# Produce bar plot
plt.bar(x_pos, num_students);
# Replace the x ticks with the year group name
# Rotate labels 30 degrees
plt.xticks(x_pos, year_groups, rotation=30);
# Add axis labels
plt.xlabel('year group');
plt.ylabel('number of students');
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Histograms
#
# We can visualise the distribution of the values using a histogram.
#
# In a histogram, data is sorted into intervals (bins) along the horizontal axis.
#
# The number of values that fall within a 'bin' is then displayed on the vertical axis.
# + [markdown] slideshow={"slide_type": "subslide"}
# To create a histogram we first need a data set.
#
# For example we can use Numpy to generate a data set with:
# - 4000 random values
# - a mean value of zero
# - a standard deviation of 1
# - a __normal__ (Gaussian) distribution (bell curve with 68% percent of values within one standard deviation of the mean)
#
#
# + slideshow={"slide_type": "-"}
# loc=mean, scale=stddev, size=num samples
x = np.random.normal(loc=0.0, scale=1.0, size=4000)
# + [markdown] slideshow={"slide_type": "subslide"}
# We can visualise how `x` is distributed, using 20 bins.
#
# Matplotlib creates 20 bins of equal width, and computes the number of entries in each bin.
# +
# Produce histogram with 20 bins
n, bins, patches = plt.hist(x, 20, facecolor='green');
# Add label
plt.xlabel('value')
plt.ylabel('frequency')
# + [markdown] slideshow={"slide_type": "subslide"}
# To view a list of:
# - the bin intervals
# - the number of values in each bin.
# + slideshow={"slide_type": "subslide"}
print(bins)
# + slideshow={"slide_type": "subslide"}
print(n)
# + [markdown] slideshow={"slide_type": "subslide"}
# __Try it yourself__
# <br>In the example above try changing the number of bins and run the cells again to see the change in the bin intervals and number of values in ech bin.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Interactive plots
# By using interactive plots we can explore the influence that changing different parameters has on an output.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# As a simple example, we will plot
#
# $$
# f(t) = t^{\alpha} \sin(\omega t)
# $$
#
# where $\alpha$ and $\omega$ are parameters.
#
# We will create two sliders allowing us to change the value of $\alpha$ and $\omega$ and observe the change in $f(t)$
# + [markdown] slideshow={"slide_type": "subslide"}
# Interactive plots be can created using module `ipywidgets` module which we imported at the start of the notebook.
# >```Python
# try:
# import ipywidgets
# except ImportError:
# try:
# # !{sys.executable} -m pip -q install ipywidgets
# import ipywidgets
# except ImportError:
# # !{sys.executable} -m pip -q --user install ipywidgets
# finally:
# # !jupyter nbextension enable --py widgetsnbextension
# print("You will need to refresh your browser page")
# from ipywidgets import interact
# ```
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# We write the code to generate the plot as usual.
#
# (To generate symbols α or ω type `\omega` or `\alpha`, pressing `Tab` key at the end.)
# +
ω=1
α=0
t = np.linspace(0, 2*np.pi, 200)
plt.plot(t, (t**α)*np.sin(ω*t))
plt.xlabel('$t$')
plt.ylabel('$f$')
plt.title(r"$\alpha$ = {}, $\omega$ = {}".format(α, ω))
# + [markdown] slideshow={"slide_type": "subslide"}
# We encasulate the code to generate the plot within a function.
#
# The parameters we want to vary are given as function arguments.
# +
def plot(ω=1, α=0):
"A plot of the function f(t)= (t**α)*np.sin(ω*t)"
t = np.linspace(0, 2*np.pi, 200)
plt.plot(t, (t**α)*np.sin(ω*t))
plt.xlabel('$t$')
plt.ylabel('$f$')
plt.title(r"$\alpha$ = {}, $\omega$ = {}".format(α, ω))
# + [markdown] slideshow={"slide_type": "subslide"}
# The `interact` function, takes the following arguments:
# - the plotting function
# - a tuple for each variable parameter:
# - maximum value (inclusive)
# - minimum value (inclusive)
# - step size between each possible value
# -
interact(plot, ω=(-10, 10, 0.25), α=(0, 2, 0.25));
# You can now adjust the values by moving the sliders.
#
# The new values appear as the title to the plot.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Animated Plots
#
# Plotting can be a very useful way to visualise what is happening in a dynamic physical system.
#
# Matplotlib can be used to create animated plots showing the change in a system over time.
# + [markdown] slideshow={"slide_type": "subslide"}
# We will use the `animation` and `rc` subpackages.
#
# >from matplotlib import animation, rc
#
# We will also use a package to allow us to view the animated output within jupyter notebook.
#
# >from IPython.display import HTML
#
# These packages were imported at the start of this notebook.
# + [markdown] slideshow={"slide_type": "subslide"}
# Let's start with a simple example to learn how to build an animation.
#
# We will then move onto an example of a physical model that you may study in one of your other subjects.
#
#
# -
# The set of steps to build an animated plot is:
# 1. Create a figure window
# 1. Create axes within the window
# 1. Create object(s) to animate e.g. a line or point
# 1. Define an animation function for the change you want to see at each timestep
# 1. Use the function `animation.FuncAnimation` to create your animation and give it a name.
# 1. Call the animation name to play it.
# 1. (Save the animation)
# Example: An animated sine wave.
# +
# 1. Create a figure window.
fig = plt.figure()
# 2. Creates axes within the window
ax = plt.axes(xlim=(0, 2), ylim=(-2, 2))
# 3. Empty object (no data opints) to animate e.g. a line
# Name must end with a `,` comma.
line, = ax.plot([], [], lw=2)
# + slideshow={"slide_type": "subslide"}
# 4. Animation function: called sequentially
# i = frame number.
# Sine wave generated, phase shift proportional to i
def animate(i):
x = np.linspace(0, 2, 1000)
y = np.sin(2 * np.pi * (x - 0.01 * i))
line.set_data(x, y)
# single return arguments should be given as a tuple with one value
return (line,)
# -
# When returning a single argument (e.g. `line`) it is returned as a tuple with one value i.e. `(line, )`.
# + slideshow={"slide_type": "subslide"}
# 5. Animates the data; 100 frames, 20ms delay between frames, blit=True : only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, frames=100, interval=20, blit=True)
# Set the animation display format to html which the non-Python parts of this notebook are written in.
rc('animation', html='html5')
# 6. Play the animation
anim
# 7. Save the animation as a .mp4 file
# 15 frames per second
# 1800 bits of data processed/stored per second
writer = animation.writers['ffmpeg'](fps=15, bitrate=1800)
anim.save('img/sin_movie.mp4', writer=writer)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Simulating Physical Systems: Spring-Mass System
# If you are studying an engineering-related subject, you will most likely study simple harmonic motion; a type of periodic motion or oscillation motion.
#
# For this oscillation to happen, the restoring force is:
# - directly proportional to the displacement
# - in the direction opposite to the displacement.
#
#
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# A typical example of this is a mass attached to a spring.
#
# <img src="img/spring_mass_system.gif" alt="Drawing" style="width: 500px;"/>
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# If we assume that:
# - the spring is ideal (it has no weight, mass, or damping losses)
# - there is no friction
#
# we can use a simple equation to give the position of the mass, $x$, as a function of time, $t$:
#
# $x(t) = A cos(\omega t - \phi)$
#
# where:
# <br>$A$: Maximum amplitude (displacment from initial position), defined by the initial conditions of the system.
# <br>$\phi$ : Phase (the initial angle of a sinusoidal function at its origin)
# <br>$\omega$ : Angular frequency (frequency of oscillation expressed in radians)
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# Angular frequency
#
# $\omega=2\pi f = \sqrt{\frac{k}{m}}$
#
# where
# <br>$k$ : spring constant
# <br>$m$ : mass (kg)
# <br>$f$ : frequency (Hz)
# + slideshow={"slide_type": "subslide"}
# 1. Create a figure window.
fig = plt.figure()
# 2. Create axes within the window
ax = plt.axes(xlim=(-2, 2), ylim=(-3.5,3.5))
# 3. Two objects to animate
line, = ax.plot([2,1], [4,3], marker="" , ls="-") # a line
point, = ax.plot([1], [1], marker='o', ms=40) # a point
# Spring data
k = 100
m = 20
w = np.sqrt(k/m)
phi = 2
A = 2
# Position of mass as function of time
def fun(t):
return A*np.sin(w * t + phi)
# 4. Animation function
def animate(i):
x = fun(i/10)
line.set_data([0,0], [4, -x])
point.set_data(0, -x)
return line, point
# 5. Create animation; 500 frames, 50ms delay between frames, blit=True : only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, frames=500, interval=50, blit=True)
# 6. Play animation
anim
# 7. Save animation
# writer = animation.writers['ffmpeg'](fps=15, bitrate=1800)
# anim.save('img/spring_movie.mp4', writer=writer)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Visualising 2D Arrays
#
# Two-dimensonal arrays can be visualized with the `plt.matshow` function.
#
# Each square cell represents one element of the array.
#
# A colour scale (`plot.colorbar`) is used to indicate the value of each cell.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# Note that the first row of the matrix (with index 0), is plotted at the top, which corresponds to the location of the first row in the matrix.
# +
x = np.array([[8, 7, 6, 8],
[8, 7, 6, 3],
[6, 6, 5, 2],
[4, 3, 2, 1]])
plt.matshow(x)
plt.colorbar()
# + [markdown] slideshow={"slide_type": "subslide"}
# The default colour map is used (it is called `viridis`).
#
# The highest value in the array is mapped to yellow.
# <br>The lowest value is mapped to purple.
# <br>The number is between vary between blue and green.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# There are many other colour maps available.
#
# Doumentation of availble colour maps can be found here: http://matplotlib.org/users/colormaps.html.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# To change the color map, the `matplotlib.cm` package is used, which we imported and renamed at the strt of the notebook.
# >`import matplotlib.cm as cm`
#
# Specify the name of any of the available color maps with the `cmap` keyword.
# + slideshow={"slide_type": "subslide"}
plt.matshow(x, cmap=cm.Wistia)
plt.colorbar();
# + [markdown] slideshow={"slide_type": "subslide"}
# The keywords `vmin` and `vmax` can be used to explicitly set the maximum and minimum value for the colour scale.
# -
plt.matshow(x, cmap=cm.bone, vmin=0, vmax=20)
plt.colorbar();
# + [markdown] slideshow={"slide_type": "subslide"}
# The standard colormaps also all have reversed versions. They have the same names with `_r` tacked on to the end.
# -
plt.matshow(x, cmap=cm.bone_r, vmin=0, vmax=20)
plt.colorbar();
# + [markdown] slideshow={"slide_type": "subslide"}
# __Try it yourself__
# <br>Find a colour map you like using the link provided. Change the colours of the examples above to colours that you prefer.
# -
# If you have a specific range for the x and y axis of the plot, it can be more convenient to use plot types `pcolor` and `fcontour`.
#
# +
import numpy as np
x = np.array([[8, 7, 6, 8],
[8, 7, 6, 3],
[6, 6, 5, 2],
[4, 3, 2, 1]])
y = range(25, 29)
z = range(31, 35)
# -
# For example if you want to overlay the data wiht another type of plot.
plt.pcolor(y, z, x, cmap=cm.Reds, vmin=3, vmax=7)
plt.colorbar();
plt.contourf(y, z, x, cmap=cm.Reds)
plt.plot(25.5, 32, 'bo')
plt.plot(27, 33, 'yo')
plt.colorbar();
# + [markdown] slideshow={"slide_type": "slide"}
# ## 3D Plotting
# An advantage of plotting compuationally is that we can easily produce more complex figures such as 3D plots.
#
# This can be useful, for example, when visualiing the form of a function with two independent variables.
#
#
# + [markdown] slideshow={"slide_type": "subslide"}
# Many different styles of 3D plot are available.
#
# A number of examples can be found here: https://matplotlib.org/examples/mplot3d/index.html
# + [markdown] slideshow={"slide_type": "subslide"}
# Today, we will go through just one example.
#
# You can then explore further, using today's seminar as a basis.
# + [markdown] slideshow={"slide_type": "subslide"}
# We will plot the function:
# $$
# f(x, y) = \sin(x) \cos\left(\frac{xy^2}{10}\right)
# $$
# as a 3D surface.
# + [markdown] slideshow={"slide_type": "subslide"}
# Plotting in 3D requires another package which was imported at the start of this notebook.
# >`from mpl_toolkits.mplot3d import axes3d`
# + [markdown] slideshow={"slide_type": "subslide"}
# First we create points in the $x$ and $y$ directions at which we want to evaluate the function:
# -
N = 50 # 50 points in each direction
x = np.linspace(-np.pi, np.pi, N)
y = np.linspace(-np.pi, np.pi, N)
# + [markdown] slideshow={"slide_type": "subslide"}
# Next, we use NumPy to create a 'grid' of $(x, y$) points:
# -
X, Y = np.meshgrid(x, y)
# + [markdown] slideshow={"slide_type": "subslide"}
# When printing you may want to round the value of each element.
#
# You can do this using the `np.around` function.
#
# Example:
# <br>`print(np.around(X,2), np.around(Y,2))`
# <br>rounds each value to 2 decimal places.
# + [markdown] slideshow={"slide_type": "subslide"}
# __Try it yourself__
# <br>The easiest way to visualise how `meshgrid` work is to:
# - try making `N` smaller (fewer points along each axis) in the cell above.
#
# <br>
# In the cell below:
# - print `X` and `Y` using <br>`print(np.around(X,2))`<br>`print(np.around(Y,2))`<br><br>
# - print the shape of `X` and `Y` using<br>`print(X.shape)`<br>`print(Y.shape)`<br><br>
# - What happens if N is different for `X` than for `Y`?
# +
# How does meshgrid work?
# + [markdown] slideshow={"slide_type": "subslide"}
# Hopefully, you can see that by overlaying `X` and `Y` we can dscribe the x,y coordinates of every point on the 3D surface.
# + [markdown] slideshow={"slide_type": "subslide"}
# Change the value of `N` to it's original value `N = 50`.
# + [markdown] slideshow={"slide_type": "subslide"}
# We now evaluate the function $f$ at each point:
# -
f = np.sin(X)*np.cos((X*Y**2)/10)
# + [markdown] slideshow={"slide_type": "subslide"}
# The 3D figure is plotted using:
# +
# Create a figure
fig = plt.figure()
# Specify 3D axes
ax = plt.axes(projection='3d')
# Plot the function as a surface
surf = ax.plot_surface(X, Y, f ,cmap=cm.Reds)
# Label the axes
ax.set_xlabel('X', fontsize=20)
ax.set_ylabel('Y', fontsize=20)
ax.set_zlabel('Z', fontsize=20)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Review Exercises
# Below are some exercises for you to practise producing plots and figures.
# <br>Some of the examples require you to generate data using methods we have studied in previous seminars.
#
#
# -
# ### Review Exercise: Simple Plot
# In the cell below, write a program to:
# - Plot $y=x^{3}-x^{2}-4x+4$ for $x = -3$ to $x= 3$.
# - Use a red, dashed line.
# - On the same figure, plot a black square at every point where $y$ equals zero.
# - Set the size of the markers to 10.
# - Label the axes as 'x-axis' and 'y-axis'.
# - Add a title of your choice.
#
# *Hint: You may have to search online for the function argunents needed for some of these manipulations.*
# +
# Simple Plot
# +
# Example Solution
x = np.linspace(-5, 5, 100)
y = x**3 - x**2 - 4*x + 4
plt.plot(x, y, 'r--')
r = np.roots([1, -1, -4, 4])
z = np.zeros(len(r))
plt.plot(r, z, 'ks', markersize=10)
plt.xlim(x[0], x[-1]);
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.title('First Graph');
# -
# ### Review Exercise: Plotting with Conditionals
# Create an array, `x` with 100 values from 0 to 20, inclusive.
#
# Compute $y=\sin(x)$.
#
# Plot $y$ vs. $x$ with a blue line.
#
# Replace all values of $y$ that are larger than 0.5 with 0.5.
#
# Replace all values of $y$ that are smaller than $-$0.75 with $-0.75$.
#
# Plot $x$ vs. $y$ using a red line on the same graph.
# +
# Plotting with conditionals
# -
# Example Solution
x = np.linspace(0, 20, 100)
y = np.sin(x)
plt.plot(x, y, 'b')
y[y > 0.5] = 0.5
y[y < -0.75] = -0.75
plt.plot(x, y, 'r');
plt.xlim(x[0], x[-1])
# ### Review Exercise: Bar Chart
#
# Write a Python programming to display a bar chart of the popularity of programming Languages.
#
# Set the bar colour green.
#
# Sample data:
# <br>Programming languages: Java, Python, PHP, JavaScript, C#, C++
# <br>Popularity: 22.2, 17.6, 8.8, 8, 7.7, 6.7
#
# Label the x and y axes and add a title.
# +
# Bar Chart
# +
# Example Solution
languages = ['Java', 'Python', 'PHP', 'JavaScript', 'C#', 'C++']
popularity = [22.2, 17.6, 8.8, 8, 7.7, 6.7]
# Create an array with the position of each bar along the x-axis
x_pos = np.arange(len(languages))
# Produce bar plot
plt.bar(x_pos, popularity, color='green')
# Replace the x ticks with the year language name
plt.xticks(x_pos, languages)
# Title and axes labels
plt.xlabel("Languages")
plt.ylabel("Popularity")
plt.title("Popularity of\n Programming Languages" );
# -
# ### Review Exercise: Plotting Functions Interactively
#
# Consider the function we used earlier:
#
# $x(t) = A cos(\omega t - \phi)$
#
# The position of a mass $x$ (relative to it's start position), attached to a spring, as a function of time, depends on the angular frequency $\omega$ and the phase $ \phi$ of the system.
#
# 1. Plot the function from from $t = 0$ to $t = 10$, when $A$ = 5 and $\omega = \phi = 2$. Label the axes.
# 1. Create an interactive plot with sliders for $\omega$ and $\phi$, varying from 0 to 4.
# 1. Label the axes
# 1. Observe the change in the position of $x$ as a function of time $t$ as you change $\omega$ and $\phi$
#
#
# +
# Interactive Plot
# +
# Example Solution
A = 5
def plot(ω=2, ϕ=2):
t = np.linspace(0, 10, 200)
plt.xlabel('$t$')
plt.ylabel('$x$')
x = A * np.cos(ω * t - ϕ)
plt.plot(t, x)
plt.xlim(t[0], t[-1]);
interact(plot, a=(0, 4, 0.25), b=(0, 4, 0.25));
# -
# ### Review Exercise: Plotting Functions Interactively
#
# Consider the function
# $$
# f(x) = e^{x/10} \sin(ax)\cos(bx)
# $$
# from $x = -4\pi$ to $x = 4\pi$.
#
# 1. Plot the function when $a = b = 1$. Label the axes.
# 1. Create an interactive plot with sliders for $a$ and $b$, varying from 0 to 2.
# +
# Interactive plot
# +
# Example Solution
def plot(a=1, b=1):
x = np.linspace(-4*np.pi, 4*np.pi, 200)
plt.xlabel('$x$')
plt.ylabel('$f$')
f = np.exp(x/10) * np.sin(a * x) * np.cos(b * x)
plt.plot(x, f)
plt.xlim(x[0], x[-1]);
interact(plot, a=(0, 2, 0.25), b=(0, 2, 0.25));
# -
# ### Review Exercise: Visualising 2D Arrays
# Create:
# - a Numpy 30 by 30 array
# - with randomly selected integer values for all elements
# - with an allowable range for each element of 1 to 50
#
# Represent the array using `matshow`:
# - Use a colour map of your choice
# - Set the colour scale minimum to 1
# - Set the colour scale maximum to 25
# - Display a colour scale on the plot
# +
# 2D Array plot
# +
# Example Solution
a = np.random.randint(1, 50, size=(30,30))
plt.matshow(a, cmap=cm.BuPu_r, vmin=0, vmax=25)
plt.colorbar();
# -
# ### Review Exercise: Animated Plot - Projectile Trajectory
#
# A projectile is launched with initial velocity $v$, at an angle of $\theta$.
#
# If we neglect the force of drag on the projectile, it will travel in an arc.
#
# It's position in the horizontal (x) and vertical (y) direction at time $t$ can be found by.
#
# $x= vt\cos(\theta)$
# <br>$y= vt\sin(\theta) - \frac{1}{2}gt^2$
#
# where gravity, $g=9.81$ms$^{-2}$
#
# <img src="img/projectile2.png" alt="Drawing" style="width: 300px;"/>
#
# In the cell below write a program to create an animation of the particle animate the motion of an ideal projectile when launched:
# - from an angle of $\theta = \pi/2$
# - with an initial velocity of $v=10$ms$^{-1}$
#
# *Hint*
# 1. Create a figure window
# 1. Create axes within the figure window.
# 1. Create a point to animate
# 1. Create variables for $g$, $v$, and $\theta$
# 1. Write a function, `fun` to find x and y position as a function of time, t (function argument = t)
# 1. To animate the motion of the particle create a function, `animate`. Within animate call the function, `func`. Use a timestep of i/10.
# 1. Use the function `animation.FuncAnimation` to create the animation. Use 50 frames and an interval of 50ms between each frame.
#
#
# +
# Trajectory Plot
# +
# Example Solution
# Creates a figure window.
fig = plt.figure()
# Creates axes within the window
ax = plt.axes(xlim=(0, 10), ylim=(0, 5))
# Object to animate
point, = ax.plot([1], [1], marker='o', ms=40) # for points
g = 9.81
v = 10
theta = np.pi/4
# Position of mass as function of time
def fun(t):
x = v * t * np.cos(theta)
y = v * t * np.sin(theta) - (0.5 * g * t**2)
return x, y
def animate(i):
x, y = fun(i/10)
point.set_data(x, y)
return (point,)
# Animates the data; 50 frames, 50ms delay between frames, blit=True : only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, frames=50, interval=50, blit=True)
anim
# -
# ### Review Exercise: 3D plotting
# ### Extension Exercise: Plotting System of Simultaneous Equations Interactively.
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Summary
#
# - Matplotlib can be used to produce a wide range of styles of figures and plots of your data.
# - Simple line and scatter plots can be customised using a `formatstring`
# - Additional features such as a figure legend and axis labels can be set as arguments when creating plots.
# - We can "vectorise" other functions so that they accept data structure as arguments.
# - Broasdcasting is a useful tool for applying information in one numpy array to another without having to repeat or reshape the arrays to match one another.
# - The matplotlib library can be used to quicky produce simple plots to visualise and check your solutions to mathematical problems.
# - This includes fitting a curve or a relationship to a dataset.
# + [markdown] slideshow={"slide_type": "slide"}
# # Homework
#
# 1. __PULL__ the changes you made in-class today to your personal computer.
# 1. __COMPLETE__ any unfinished Review Exercises.
# 1. __PUSH__ the changes you make at home to your online repository.
| 8_Plotting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Numpy
# ## working with arrays in Python
#
# ![](https://upload.wikimedia.org/wikipedia/commons/thumb/1/1a/NumPy_logo.svg/640px-NumPy_logo.svg.png)
#
#
# ### Prof. <NAME>
# © 2018 <NAME>
# + [markdown] slideshow={"slide_type": "slide"}
# ## In this tutorial you will...
# -
# * create 1- and 2-dimensional `numpy` arrays
# * see how to access individual elements and array slices
# * perform basic mathematical operations on each array element
# * learn about some of the most useful `numpy` functions
# * get a quick introduction to `numpy` structured arrays
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Full documentation
# -
# * http://www.numpy.org/
# * [numpy tutorial](https://docs.scipy.org/doc/numpy/user/quickstart.html)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Motivation
# +
list1 = [1, 2, 3, 4, 5]
list2 = [3, 4, 2, 6, 4]
# compute the item-wise list sum
# + [markdown] slideshow={"slide_type": "slide"}
# ## Load the Numpy package
# -
# + [markdown] slideshow={"slide_type": "slide"}
# ## `numpy` array operations
# -
a1 = np.array([1, 2, 3, 4, 5])
a2 = np.array([3, 4, 2, 6, 4])
# + slideshow={"slide_type": "fragment"}
# + [markdown] slideshow={"slide_type": "slide"}
# ## Array indexing and slicing
# -
print(a1)
# + slideshow={"slide_type": "fragment"}
# + [markdown] slideshow={"slide_type": "slide"}
# ## 2-D arrays
# -
# + slideshow={"slide_type": "fragment"}
# + [markdown] slideshow={"slide_type": "slide"}
# ## `numpy` arrays are mutable
# -
a = np.array([1, 2, 3, 4, 5])
# + [markdown] slideshow={"slide_type": "slide"}
# ## Useful `numpy` functions
# -
np.arange()
# + slideshow={"slide_type": "subslide"}
np.linspace()
# + slideshow={"slide_type": "subslide"}
np.zeros()
# + slideshow={"slide_type": "fragment"}
np.ones()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Trig functions
# -
np.cos()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Useful `numpy` array attributes
# -
a2.shape
a2.size
# + [markdown] slideshow={"slide_type": "slide"}
# ## Useful `numpy` array methods
# -
a1.mean()
# + slideshow={"slide_type": "subslide"}
print()
# + slideshow={"slide_type": "subslide"}
a2.sum()
# + slideshow={"slide_type": "fragment"}
a2.min()
# + slideshow={"slide_type": "fragment"}
a2.argmin()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Truth arrays
# -
a1 > 3
# + slideshow={"slide_type": "fragment"}
# + slideshow={"slide_type": "fragment"}
# + [markdown] slideshow={"slide_type": "slide"}
# ## Structured arrays
# -
# + [markdown] slideshow={"slide_type": "subslide"}
# For more on `numpy` structured arrays see:
# * https://docs.scipy.org/doc/numpy/user/basics.rec.html#structured-arrays
# + slideshow={"slide_type": "slide"}
dir(a2)
# + slideshow={"slide_type": "slide"}
help(a2)
| numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ABs6xC-hWIjM"
# #Άσκηση 1 , Νευρωνικά Δίκτυα
# + [markdown] id="92G6ZHQXWJQe"
# ##Α. Στοιχεία Ομάδας 15
# Διολετη Ίλια 03115055 <br>
# Σουμπασάκου Αρτεμησία 03115061 <br>
# Κυριάκου Αθηνά 03117405
# + [markdown] id="pC6HjyoKWOit"
# ##Β. Εισαγωγή του Dataset
# + [markdown] id="c0sVZQ1aLtiH"
# Το spambase dataset είναι μια συλλογή e-mails με περιεχόμενο "σπαμ" δηλαδή διαφημιστικό, οικονομικές απάτες, μηνύματα αλυσίδας, πορνογραφία κ.α. <br> Τα μη-σπαμ μηνύματα προέρχονται από επαγγελματικά και προσωπικά μειλ που έχουν κατατεθεί. Αυτά περιέχουν ονόματα και κωδικούς περιοχών και είναι επομένως χρήσιμα για δημιουργία εξατομικευμένων φίλτρων σπαμ. <br><br>
# Περιέχει **4601 δείγματα 57 χαρακτηριστικά**. Πιo συγκεκριμένα: <br>
# * 48 χαρακτηριστικά word_freq_WORD (συνεχείς πραγματικες μεταβλητές WORD στο [0,100]) = αφορούν το ποσοστό λέξεων του μηνύματος που είναι ίδιες με το WORD.
# <br>
# * 6 χαρακτηριστικά char_freq_CHAR (συνεχείς πραγματικες μεταβλητές CHAR στο [0,100])= αφορούν το ποσοστό χαρακτήρων του μηνύματος που είναι ίδιοι με αυτό το CHAR.
# <br>
# * 1 χαρακτηριστικό capital_run_length_average (συνεχής πραγματική μεταβλητή [1,...])=μέσο μήκος των συνεχόμενων ακολουθιών με κεφαλαία.
# <br>
# * 1 χαρακτηριστικό capital_run_length_total (συνεχής πραγματική μεταβλητή [1,...])= συνολικός αριθμός κεφαλαίων γραμμάτων στο μήνυμα.
# <br>
# * 1 χαρακτηριστικό spam με τιμές {0,1} = ορίζει την κλάση του δείγματος , σπαμ (1) ή μή (0)
#
# Τα χαρακτηριστικά είναι όλα αριθμητικά και διατεταγμένα.
# + id="h71yuLf8WHou" executionInfo={"status": "ok", "timestamp": 1576840036822, "user_tz": -120, "elapsed": 27856, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04503647683853951774"}} outputId="bde79b3f-2e80-4482-d59e-b51795c258ba" colab={"base_uri": "https://localhost:8080/", "height": 343}
# !pip install --upgrade pip #upgrade pip package installer
# !pip install scikit-learn --upgrade #upgrade scikit-learn package
# !pip install numpy --upgrade #upgrade numpy package
# !pip install pandas --upgrade #--upgrade #upgrade pandas package
# + id="vOzBXafqYohv" executionInfo={"status": "ok", "timestamp": 1576840037708, "user_tz": -120, "elapsed": 28660, "user": {"displayName": "ilia di", "photoUrl": "", "userId": "04503647683853951774"}} outputId="852230f1-ca9d-4711-b512-419da4c25e5a" colab={"base_uri": "https://localhost:8080/", "height": 272}
import pandas as pd
df=pd.read_csv("http://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.data", header=None)
print(df)
df.shape
# + [markdown] id="wvNUs2wVbL_L"
# Βλέπουμε οτι δεν υπάρχει αρίθμηση γραμμών και επικεφαλίδες στο αρχείο. Ενώ, όπως αναφέρθηκε, υπάρχουν **ετικέτες** στην τελευταία κολόνα (58η), καθώς και 57 κολόνες χαρακτηριστικών δείγματος. <br> <br>
# Αναφέρεται επίσης ότι δεν χρειάστηκε να γίνει κάποια μετατροπή σε αυτό το text αρχείο.
# + id="bHmm5YwndL8X" executionInfo={"status": "ok", "timestamp": 1576840037714, "user_tz": -120, "elapsed": 28600, "user": {"displayName": "ilia di", "photoUrl": "", "userId": "04503647683853951774"}} outputId="292cc85b-352d-4cba-b162-58060ca91d98" colab={"base_uri": "https://localhost:8080/", "height": 34}
df.isnull().values.any()
# + [markdown] id="txXzjRWLkFxY"
# Παρατηρούμε ότι ενώ στην περιγραφή του dataset αναφέρεται ότι υπάρχουν απουσιάζουσες τιμές, με το αποτέλεσμα του παραπάνω κώδικα γίνεται εμφανές ότι κάτι τέτοιο δεν ισχύει για το συγκεκριμένο σύνολο.
# + id="ob4dMfRihttC" executionInfo={"status": "ok", "timestamp": 1576840037721, "user_tz": -120, "elapsed": 28539, "user": {"displayName": "ilia di", "photoUrl": "", "userId": "04503647683853951774"}} outputId="145d817d-aa42-440d-b3b6-8df264ca2541" colab={"base_uri": "https://localhost:8080/", "height": 68}
import numpy as np
np_data=df.values #όλα τα χαρακτηριστικά είναι αριθμητικά άρα μετατρέπουμε απευθείας σε numpy array
features=np_data[:, 0:-1]
labels = np_data[:,-1].astype(int)
frequencies = np.bincount(labels)
print("class frequencies: ", frequencies)
total_samples = frequencies.sum()
print("total samples: ", total_samples)
percentage = (frequencies / total_samples) * 100
print("class percentage: ", percentage)
# + [markdown] id="fY8mUwyOjRSc"
# Το dataset **δεν είναι ισορροπημένο**, αφού το 60.6% των δειγμάτων ανήκουν στην κλάση 0 (οχι σπαμ) και το 39.4% στην κλάση 1 (σπαμ)
# + id="2s7uza19kIaK"
from sklearn.model_selection import train_test_split
train, test, train_labels, test_labels = train_test_split(features, labels, test_size=0.3) #30% test set
# + [markdown] id="8T1D7tlTmxBS"
# Παραπάνω έγινε ο ζητούμενος διαχωρισμός σε train και test (για 30%) sets.
# + [markdown] id="cncZbPFmkaZU"
# ##Γ. Baseline classification
# + id="hngb3LdWfKVy"
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
spam_accuracy = {}
cnf_matrix={}
fmicro={}
fmacro={}
def get_scores(classifier, prediction):
spam_accuracy[classifier] = accuracy_score(test_labels, prediction)
cnf_matrix[classifier] = confusion_matrix(test_labels, prediction)
fmicro[classifier]=precision_recall_fscore_support(test_labels, prediction, average='micro')
fmacro[classifier]=precision_recall_fscore_support(test_labels, prediction, average='macro')
# + id="4UIMzbzryNOC" executionInfo={"status": "ok", "timestamp": 1576844873531, "user_tz": -120, "elapsed": 2977, "user": {"displayName": "ilia di", "photoUrl": "", "userId": "04503647683853951774"}} outputId="5e686a5d-7ab5-405e-88d5-22228a43ad31" colab={"base_uri": "https://localhost:8080/", "height": 1000}
#DUMMY
from sklearn.dummy import DummyClassifier
dc_uniform = DummyClassifier(strategy="uniform")
dc_constant_0 = DummyClassifier(strategy="constant", constant=0)
dc_constant_1 = DummyClassifier(strategy="constant", constant=1)
dc_most_frequent = DummyClassifier(strategy="most_frequent")
dc_stratified = DummyClassifier(strategy="stratified")
model = dc_uniform.fit(train, train_labels)
pred= dc_uniform.predict(test)
get_scores('uniform (random)', pred)
model = dc_constant_0.fit(train, train_labels)
pred= dc_constant_0.predict(test)
get_scores('constant 0', pred)
model = dc_constant_1.fit(train, train_labels)
pred= dc_constant_1.predict(test)
get_scores('constant 1', pred)
model = dc_most_frequent.fit(train, train_labels)
pred=dc_most_frequent.predict(test)
get_scores('most frequent label', pred)
model = dc_stratified.fit(train, train_labels)
pred = dc_stratified.predict(test)
get_scores('stratified', pred)
#GAUSSIAN NAIVE BAYES
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
model = gnb.fit(train, train_labels)
pred=gnb.predict(test)
get_scores('gaussian naive bayes', pred)
#KNN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier() #default k = 5
knn.fit(train, train_labels)
pred = knn.predict(test)
get_scores('knn', pred)
#MLP
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(hidden_layer_sizes=1)
mlp.fit(train, train_labels)
pred = mlp.predict(test)
get_scores('mlp', pred)
print("Accuracy score:\n")
for k in spam_accuracy:
print(k, "\n", spam_accuracy[k])
print("\n")
print("Confusion matrix:\n")
for k in cnf_matrix:
print(k, "\n", cnf_matrix[k])
print("\n")
print("precision,recall,f1 micro average:\n")
for k in fmicro:
print(k, fmicro[k])
print("\n")
print("precision,recall,f1 macro average:\n")
for k in fmacro:
print(k, fmacro[k])
print("\n")
# + id="omeJ9JYQ_dhH"
# %matplotlib inline
import matplotlib.pyplot as plt
def plot_bar_chart(label_to_value, title, x_label, y_label, index): #απο https://www.programcreek.com/python/example/102292/matplotlib.pyplot.bar
n = len(label_to_value)
labels = sorted(label_to_value.keys())
values = [(label_to_value[label])[index] for label in labels]
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.bar(range(n), values, align='center')
plt.xticks(range(n), labels, rotation='vertical', fontsize='7')
plt.gcf().subplots_adjust(bottom=0.2) # make room for x-axis labels
plt.show()
# + id="iIHb_h3A_ug_" executionInfo={"status": "ok", "timestamp": 1576844886589, "user_tz": -120, "elapsed": 1373, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04503647683853951774"}} outputId="bb39aa53-7989-4a17-dd4f-806e2fa2787b" colab={"base_uri": "https://localhost:8080/", "height": 661}
plot_bar_chart(fmicro, 'f1 scores (micro average)', 'classifier', 'f1 score', 2)
plot_bar_chart(fmacro, 'f1 scores (macro average)', 'classifier', 'f1 score', 2)
# + [markdown] id="FAbg2zko1ZpS"
# Βλέπουμε πως ακόμα και με default παραμέτρους ο ταξινομητής mlp έχει συχνά -όχι όμως πάντα- (ακόμα και μόνο με ένα κρυμμένο επίπεδο) την καλύτερη απόδοση με βάση και τις δύο μετρικές f1. Επίσης υψηλές τιμές μετρικών έχουν πάντα ο gnb και έπειτα ο knn.
# Οι dummy ταξινομητές δεν μπορούν να ανταγωνιστούν στην απόδοση τους τους knn και gnb, κάτι που περιμέναμε σε αυτό το μεγάλου μεγέθους dataset.
#
#
# + [markdown] id="mbWnuFur7MEI"
# ##Δ. Βελτιστοποίηση ταξινομητών
# + [markdown] id="c_FanZ2S7hZz"
# ###knn,gnb optimization (&dummy)
# + [markdown] id="Qw6rbMmh82RM"
# Διαδοχικά search spaces, και βέλτιστες τιμές υπερπαραμέτρων:
#
#
# * vthreshold = [0, 600, 1200],n_components = [13],k = [1, 5, 11, 21, 31, 41]
# * gnb: vthres=0, f1 micro=0.81 , accuracy=0.81
# * knn: vthres=0, k=11, f1micro=0.91, accuracy=0.91
#
#
# * vthreshold=[0], n_components=range(2,13) , k=range(1,50) peritta
#
# * gnb: pca=3, f1 micro=0.87
# * knn: pca=12, k=19, f1 micro=0.92
#
#
#
#
#
#
#
#
#
#
#
#
# + [markdown] id="cWpjGoGKTEDy"
# Αρχικοποιούμε τους μετασχηματιστές για την προεπεξεργασία και τους ταξινομητές dummy, knn, και gnb.
# Δημιουργούμε pipelines (και για τον mlp) και ορίζουμε τιμές των παραμέτρων για βελτιστοποίηση.
# + id="9liELsWouzgm" executionInfo={"status": "ok", "timestamp": 1576840213742, "user_tz": -120, "elapsed": 8082, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04503647683853951774"}} outputId="16139cd1-7a5f-4684-fc82-158bbb16a8a6" colab={"base_uri": "https://localhost:8080/", "height": 102}
# !pip install --upgrade imbalanced-learn
from imblearn.pipeline import Pipeline
from sklearn.metrics import classification_report
from sklearn.neighbors import KNeighborsClassifier
from sklearn.dummy import DummyClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
# φέρνουμε τις γνωστές μας κλάσεις για preprocessing
from sklearn.feature_selection import VarianceThreshold
from sklearn.preprocessing import StandardScaler # ή minmax scaler που πάει πριν την επιλογή χαρακτηριστικών
from imblearn.over_sampling import RandomOverSampler #ή undersampling
from sklearn.decomposition import PCA
# αρχικοποιούμε τους εκτιμητές (μετασχηματιστές και ταξινομητή) χωρίς παραμέτρους
selector = VarianceThreshold()
scaler = StandardScaler()
ros = RandomOverSampler()
pca = PCA()
#Αρχικοποιούμε τους ταξινομητές
dc_uniform = DummyClassifier(strategy="uniform")
dc_constant_0 = DummyClassifier(strategy="constant", constant=0)
dc_constant_1 = DummyClassifier(strategy="constant", constant=1)
dc_most_frequent = DummyClassifier(strategy="most_frequent")
dc_stratified = DummyClassifier(strategy="stratified")
gnb = GaussianNB()
knn = KNeighborsClassifier(n_jobs=-1) # η παράμετρος n_jobs = 1 χρησιμοποιεί όλους τους πυρήνες του υπολογιστή
clf = MLPClassifier(hidden_layer_sizes=1)
#ορίζουμε pipelines
Pipelines = {}
Pipelines['dc_uniform'] = Pipeline([('selector',selector), ('scaler', scaler), ('sampler', ros), ('pca', pca), ('dc_uniform',dc_uniform)])
Pipelines['dc_constant_0'] = Pipeline([('selector',selector), ('scaler', scaler), ('sampler', ros), ('pca', pca), ('dc_constant_0',dc_constant_0)])
Pipelines['dc_constant_1'] = Pipeline([('selector',selector), ('scaler', scaler), ('sampler', ros), ('pca', pca), ('dc_constant_1',dc_constant_1)])
Pipelines['dc_most_frequent'] = Pipeline([('selector',selector), ('scaler', scaler), ('sampler', ros), ('pca', pca), ('dc_most_frequent',dc_most_frequent)])
Pipelines['dc_stratified'] = Pipeline([('selector',selector), ('scaler', scaler), ('sampler', ros), ('pca', pca), ('dc_stratified',dc_stratified)])
Pipelines['naive_bayes'] = Pipeline([('selector',selector), ('scaler', scaler), ('sampler', ros), ('pca', pca), ('naive_bayes',gnb)])
Pipelines['knn'] = Pipeline([('selector',selector), ('scaler', scaler), ('sampler', ros), ('pca', pca), ('knn',knn)])
Pipelines['mlp'] = Pipeline(steps=[('selector',selector), ('scaler', scaler), ('sampler', ros), ('pca', pca),('mlp', clf)], memory='mlp')
#Δήλωση των παραμέτρων και των διαστημάτων στα οποία αυτές ανήκουν
#Τσεκάρουμε το variance των μεταβλητών
#train_variance = train.var(axis=0)
#print(train_variance)
#print(np.max(train_variance))
#Παράμετρος του selector
vthreshold = [0]
#Παράμετρος PCA
n_components = list(range(2,13))
#Παράμετροι του knn
myList=list(range(1,50))
k = list(filter(lambda x: x % 2!=0, myList)) # η υπερπαράμετρος του ταξινομητή
weight_options=['uniform', 'distance']
metric_options=['euclidean','manhattan','chebyshev','minkowski','wminkowski','seuclidean','mahalanobis']
#scores
f_scores = ['f1_micro', 'f1_macro']
# + id="SNBi-uUgBtwO"
#παράμετροι mlp
import numpy as np
activation_options=['identity', 'logistics', 'tanh', 'relu']
solver_options=['lbfgs', 'sgd', 'adam']
max_iter_options=list(range(230,350,20))
learning_rate_options=['constant', 'invscaling', 'adaptive']
alpha_options=list(np.arange(0.0001,0.0003, 0.00003))
# + [markdown] id="oAjI8mg6V7MU"
# Ορίζουμε το Gridsearch για κάθε ταξινομητή.
# + id="EFCi2NIj8GgO"
from sklearn.model_selection import GridSearchCV
cv_num=5
estimators = {}
# Βρόχος βελτιστοποίησης των ταξινομητών
for fscore in f_scores:
estimators['uniform (random)'] = GridSearchCV(Pipelines['dc_uniform'], dict(selector__threshold=vthreshold, pca__n_components=n_components),
cv=cv_num, scoring=fscore, refit=fscore, n_jobs=-1)
estimators['constant 0'] = GridSearchCV(Pipelines['dc_constant_0'], dict(selector__threshold=vthreshold, pca__n_components=n_components),
cv=cv_num, scoring=fscore, refit=fscore, n_jobs=-1)
estimators['constant 1'] = GridSearchCV(Pipelines['dc_constant_1'], dict(selector__threshold=vthreshold, pca__n_components=n_components),
cv=cv_num, scoring=fscore, refit=fscore, n_jobs=-1)
estimators['most frequent label'] = GridSearchCV(Pipelines['dc_most_frequent'], dict(selector__threshold=vthreshold, pca__n_components=n_components),
cv=cv_num, scoring=fscore, refit=fscore, n_jobs=-1)
estimators['stratified'] = GridSearchCV(Pipelines['dc_stratified'], dict(selector__threshold=vthreshold, pca__n_components=n_components),
cv=cv_num, scoring=fscore, refit=fscore, n_jobs=-1)
estimators['gaussian naive bayes'] = GridSearchCV(Pipelines['naive_bayes'], dict(selector__threshold=vthreshold, pca__n_components=n_components),
cv=cv_num, scoring=fscore, refit=fscore, n_jobs=-1)
estimators['knn'] = GridSearchCV(Pipelines['knn'], dict(selector__threshold=vthreshold, pca__n_components=n_components, knn__n_neighbors=k, knn__weights=weight_options,
knn__metric=metric_options),cv=cv_num, scoring=fscore, refit=fscore, n_jobs=-1)
estimators['mlp'] = GridSearchCV(Pipelines['mlp'], dict(selector__threshold=vthreshold, pca__n_components=n_components, mlp__activation=activation_options, mlp__solver=solver_options, mlp__max_iter=max_iter_options, mlp__learning_rate=learning_rate_options, mlp__alpha=alpha_options), cv=5, scoring=fscore, n_jobs=-1)
# + [markdown] id="q5rgZuVqZQX5"
# Για κάθε estimator (εκτός από mlp):
#
# * Γίνεται εκτίμηση του χρόνου για το τελικό fit & predict
# * Υπολογίζονται (και στη συνέχεια εκτυπώνονται) confusion matrix, f1-micro average και f1-macro average
# + id="QtQunsqfYz4D"
# Εκπαίδευση των ταξινομητών στο data_set (όλοι εκτός από τον mlp)
import time
preds={}
# + id="9GYWeQJg8ufz" executionInfo={"status": "ok", "timestamp": 1576841389910, "user_tz": -120, "elapsed": 947115, "user": {"displayName": "ilia di", "photoUrl": "", "userId": "04503647683853951774"}} outputId="87993119-4f38-4f0a-b9f3-3a86e0f0b3e5" colab={"base_uri": "https://localhost:8080/", "height": 1000}
for est in estimators:
if est=='mlp':
continue
else:
start = time.time()
print (est)
estimators[est].fit(train, train_labels)
end = time.time()
print ("Fit took %s seconds" % (end-start))
start= time.time()
preds[est]=estimators[est].predict(test)
end=time.time()
print("Predict took %s seconds" % (end-start))
get_scores(est,preds[est])
print(estimators[est].best_estimator_)
print(estimators[est].best_params_)
print("Accuracy score:\n")
print(est, "\n", spam_accuracy[est])
print("Confusion matrix:\n")
print(est, "\n", cnf_matrix[est])
print("\n")
print("precision,recall,f1 micro average:\n")
print(est, fmicro[est])
print("\n")
print("precision,recall,f1 macro average:\n")
print(est, fmacro[est])
print("\n")
# + [markdown] id="edz8TmDjskrU"
# Επειδή το mlp έχει περισσότερες παραμέτρους απαιτεί περισσότερο χρόνο, άρα το εξετάζουμε μόνο του.
# + [markdown] id="nR3mn8K7JOT_"
# ###mlp optimization
# + [markdown] id="pDhCzkxlBqHr"
# Για τον mlp:
#
# * Γίνεται εκτίμηση του χρόνου για το τελικό fit & predict
# * Υπολογίζονται (και στη συνέχεια εκτυπώνονται) confusion matrix, f1-micro average και f1-macro average
# Note: Τα bar plots που έχουν τυπωθεί έχουν τιμές από μη βελτιστοποιημένους και βελτιστοποιημένους ταξινομητές, δεν τα εξετάζουμε [δεν τα σβήνουμε επίσης καθώς το fit απαιτεί 8,5 ώρες] . Τα bar plots που μας ενδιαφέρουν φαίνονται παρακάτω
# + id="jSQSBwWib8r5" executionInfo={"status": "ok", "timestamp": 1576778742953, "user_tz": -120, "elapsed": 5847733, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04503647683853951774"}} outputId="54694434-4e05-45e1-8793-771692991f67" colab={"base_uri": "https://localhost:8080/", "height": 1000}
import time
est='mlp'
start = time.time()
print (est)
estimators[est].fit(train, train_labels)
end = time.time()
print ("Fit took %s seconds" % (end-start))
start= time.time()
preds[est]=estimators[est].predict(test)
end=time.time()
print("Predict took %s seconds" % (end-start))
get_scores(est,preds[est])
print(estimators[est].best_estimator_)
print(estimators[est].best_params_)
print("Accuracy score:\n")
print(est, "\n", spam_accuracy[est])
print("Confusion matrix:\n")
print(est, "\n", cnf_matrix[est])
print("\n")
print("precision,recall,f1 micro average:\n")
print(est, fmicro[est])
print("\n")
print("precision,recall,f1 macro average:\n")
print(est, fmacro[est])
print("\n")
# + [markdown] id="lcW8m5KTHg5E"
# ###Πίνακας με χρόνους εκτέλεσης (σε sec)
# + id="GbBC0udILTJZ" executionInfo={"status": "ok", "timestamp": 1576844031494, "user_tz": -120, "elapsed": 972, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04503647683853951774"}} outputId="05df0e29-4e1a-45af-f778-25310cc8069c" colab={"base_uri": "https://localhost:8080/", "height": 170}
data = {'Fit':[1.195209, 1.175358, 1.230974, 937.7526, 31022.48, 1.187389, 1.202533, 1.148648], 'Predict':[0.013617, 0.002025, 0.002629, 0.216660, 0.002927, 0.002120, 0.007130, 0.002316]}
df = pd.DataFrame(data, index =['constant 0', 'constant 1', 'gaussian naive bayes', 'knn', 'mlp', 'most frequent label', 'stratified', 'uniform'])
print(df)
# + [markdown] id="iFzt3XDWOpMx"
# Φυσικά το fit του GridSearchCV απαιτεί σημαντικά περισσότερο χρόνο από το predict, καθώς πραγματοποιείται **fit** για όλους τους συνδυασμούς υπερπαραμέτρων . Όσο περισσότερες παραμέτρους έχουμε (άρα μεγαλύτερο grid), τόσο περισσότερος χρόνος απαιτείται. </br>
# Είναι λοιπόν λογικό να αργεί περισσότερο ο mlp και έπειτα o knn , που εκτός από τις παραμέτρους για την προεπεξεργασία έχουν και δικές τους παραμέτρους για βελτιστοποίηση.
# </br> **Predict** γίνεται στο test set με τον estimator με τις καλύτερες παραμέτρους όπως υπολογίστηκαν στο fit, επομένως γίνεται σε λιγότερο από 1 sec.
# + [markdown] id="r3LB1CQzpdzX"
# ###Bar plot σύγκρισης με τις τιμές του κάθε f1 για όλους τους βελτιστοποιημένους classifiers.
# + id="XFsPybHapkso" executionInfo={"status": "ok", "timestamp": 1576841953107, "user_tz": -120, "elapsed": 1806, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04503647683853951774"}} outputId="41bc92f6-c081-475e-f5c3-bd11ccc668d4" colab={"base_uri": "https://localhost:8080/", "height": 661}
plot_bar_chart(fmicro, 'f1 scores (micro average)', 'classifier', 'f1 score', 2)
plot_bar_chart(fmacro, 'f1 scores (macro average)', 'classifier', 'f1 score', 2)
# + [markdown] id="ypzU4bjSGxxh"
# Παρατηρούμε πως καλύτερη απόδοση με βάση και τα 2 averaged f1 scores έχει ο knn, ενώ ακολουθούν ο mlp και ο gnb. Οι dummy έχουν αρκετά χαμηλότερες τιμές f1.
# + [markdown] id="bo5pg5IeQmWj"
# ###Πίνακας μεταβολής επίδοσης (accuracy score)
#
# + id="av1yw_BAQuJ4" executionInfo={"status": "ok", "timestamp": 1576845330020, "user_tz": -120, "elapsed": 6385, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04503647683853951774"}} outputId="0b2638bb-5b1b-4deb-dc4e-64034e87fac7" colab={"base_uri": "https://localhost:8080/", "height": 170}
data = {'Before':[0.5952208544532948, 0.4047791455467053, 0.8153511947863866, 0.8030412744388125, 0.5930485155684286, 0.5952208544532948, 0.5148443157132513, 0.5162925416364953],
'After':[0.5952208544532948, 0.4047791455467053, 0.8682114409847936, 0.9203475742215785, 0.8957277335264301, 0.5952208544532948, 0.4887762490948588, 0.49312092686459086]}
df = pd.DataFrame(data, index =['constant 0', 'constant 1', 'gaussian naive bayes', 'knn', 'mlp', 'most frequent label', 'stratified', 'uniform'])
print(df)
# + [markdown] id="pFP_rqetTtMf"
# Φυσικά η επίδοση των constant 0, 1 και most frequent label δεν μεταβάλλεται καθώς αυτοί οι ταξινομητές κάνουν πάντοτε την ίδια πρόβλεψη. Επομένως η επίδοσή τους εξαρτάται μόνο από τον αριθμό δειγμάτων κάθε κλάσης στο dataset. </br>
# Σημειώνεται επίσης πως οι ταξινομητές δεν έχουν για κάθε fit ακριβώς ίδια επίδοση. </br>
# Βλέπουμε στον gnb και τον knn αρκετή βελτίωση στην ακρίβεια της πρόβλεψης, γεγονός που υπογραμμίζει την σημασία τόσο της προεπεξεργασίας, όσο και της επιλογής των καταλληλότερων παραμέτρων για το κάθε dataset (αυτό αφορά τον knn). </br>
# Βλέπουμε πολύ μεγάλη βελτίωση στον mlp με το ένα κρυμμένο επίπεδο, δεν θα ήταν βέβαια πάντα τόσο μεγάλη η διαφορά, καθώς η επίδοση του default αλλάζει αρκετά.
#
| Neural Networks/ex1_Supervised Learning - Classification/implementation_big_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 17Lands Comparing users and non-users via simulation
#
# __Data source__ - 17Lands individual card ratings (example https://www.17lands.com/card_ratings)<br>
# Data is scraped from the website by:
# 1. Manually visiting the site
# 2. Setting desired parameters
# 3. Manually copying the whole table, pasting into excel and saving as an MS-DOS CSV
import pandas as pd
card_ratings = pd.read_csv("AFR_CardRankings_Aug.csv")
card_ratings.dropna(inplace=True)
card_ratings.reset_index(drop=True,inplace=True)
card_ratings.head()
# ### Cleaning fields of interest
#
# Percentage fields all need to drop the % sign, and optionally divide by 100<br>
# IWD needs to drop the "pp" text<br>
# All the "#" fields are integers can drop the decimal and be recast
# +
#card_ratings.dtypes
card_ratings["# Seen"] = card_ratings["# Seen"].astype(int)
card_ratings["# Picked"] = card_ratings["# Picked"].astype(int)
card_ratings["# GP"] = card_ratings["# GP"].astype(int)
card_ratings["GP WR"] = card_ratings["GP WR"].str.replace("%","").astype(float) / 100
card_ratings["# OH"] = card_ratings["# OH"].astype(int)
card_ratings["OH WR"] = card_ratings["OH WR"].str.replace("%","").astype(float) / 100
card_ratings["# GD"] = card_ratings["# GD"].astype(int)
card_ratings["GD WR"] = card_ratings["GD WR"].str.replace("%","").astype(float) / 100
card_ratings["# GIH"] = card_ratings["# GIH"].astype(int)
card_ratings["GIH WR"] = card_ratings["GIH WR"].str.replace("%","").astype(float) / 100
card_ratings["# GND"] = card_ratings["# GND"].astype(int)
card_ratings["GND WR"] = card_ratings["GND WR"].str.replace("%","").astype(float) / 100
card_ratings["IWD"] = card_ratings["IWD"].str.replace("pp","").astype(float)
#card_ratings.dtypes
card_ratings.head()
# -
# ## Super-simple comparison - what are the top commons?
# +
df = card_ratings.copy() # for ease of coding and prevents us from accidentally polluting the original
# Gameplay ranking can be estimated via GIH WR (game in hand winrate)
# 17Lands users ranking can be seen via ATA (average taken at)
# Non-17Lands users ranking can be seen via ALSA (average last seen at)
df = df[df.Color.isin(["W","U","B","R","G"])]
gih = df[df.Rarity == "C"].groupby(["Color"])["Name","Color","GIH WR"].apply(lambda x: x.nlargest(5, columns="GIH WR"))
ata = df[df.Rarity == "C"].groupby(["Color"])["Name","Color","ATA"].apply(lambda x: x.nsmallest(5, columns="ATA"))
alsa = df[df.Rarity == "C"].groupby(["Color"])["Name","Color","ALSA"].apply(lambda x: x.nsmallest(5, columns="ALSA"))
commons = pd.DataFrame()
commons["Color"] = gih["Color"].values
commons["GIH"] = gih["Name"].values
commons["17LandsUsers"] = ata["Name"].values
commons["Non17LandsUsers"] = alsa["Name"].values
gih = df[df.Rarity == "U"].groupby(["Color"])["Name","Color","GIH WR"].apply(lambda x: x.nlargest(5, columns="GIH WR"))
ata = df[df.Rarity == "U"].groupby(["Color"])["Name","Color","ATA"].apply(lambda x: x.nsmallest(5, columns="ATA"))
alsa = df[df.Rarity == "U"].groupby(["Color"])["Name","Color","ALSA"].apply(lambda x: x.nsmallest(5, columns="ALSA"))
uncommons = pd.DataFrame()
uncommons["Color"] = gih["Color"].values
uncommons["GIH"] = gih["Name"].values
uncommons["17LandsUsers"] = ata["Name"].values
uncommons["Non17LandsUsers"] = alsa["Name"].values
# -
commons
# ## And uncommons
uncommons
| 17Lands Comparing users and non-users via simulation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <center>
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/Logos/organization_logo/organization_logo.png" width="300" alt="cognitiveclass.ai logo" />
# </center>
#
# # Model Evaluation and Refinement
#
# Estimated time needed: **30** minutes
#
# ## Objectives
#
# After completing this lab you will be able to:
#
# - Evaluate and refine prediction models
#
# <h1>Table of content</h1>
# <ul>
# <li><a href="#ref1">Model Evaluation </a></li>
# <li><a href="#ref2">Over-fitting, Under-fitting and Model Selection </a></li>
# <li><a href="#ref3">Ridge Regression </a></li>
# <li><a href="#ref4">Grid Search</a></li>
# </ul>
#
# This dataset was hosted on IBM Cloud object click <a href="https://cocl.us/DA101EN_object_storage">HERE</a> for free storage.
#
# +
import pandas as pd
import numpy as np
# Import clean data
path = 'https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DA0101EN/module_5_auto.csv'
df = pd.read_csv(path)
# -
df.to_csv('module_5_auto.csv')
# First lets only use numeric data
#
df=df._get_numeric_data()
df.head()
# Libraries for plotting
#
# %%capture
# ! pip install ipywidgets
from ipywidgets import interact, interactive, fixed, interact_manual
# <h2>Functions for plotting</h2>
#
def DistributionPlot(RedFunction, BlueFunction, RedName, BlueName, Title):
width = 12
height = 10
plt.figure(figsize=(width, height))
ax1 = sns.distplot(RedFunction, hist=False, color="r", label=RedName)
ax2 = sns.distplot(BlueFunction, hist=False, color="b", label=BlueName, ax=ax1)
plt.title(Title)
plt.xlabel('Price (in dollars)')
plt.ylabel('Proportion of Cars')
plt.show()
plt.close()
def PollyPlot(xtrain, xtest, y_train, y_test, lr,poly_transform):
width = 12
height = 10
plt.figure(figsize=(width, height))
#training data
#testing data
# lr: linear regression object
#poly_transform: polynomial transformation object
xmax=max([xtrain.values.max(), xtest.values.max()])
xmin=min([xtrain.values.min(), xtest.values.min()])
x=np.arange(xmin, xmax, 0.1)
plt.plot(xtrain, y_train, 'ro', label='Training Data')
plt.plot(xtest, y_test, 'go', label='Test Data')
plt.plot(x, lr.predict(poly_transform.fit_transform(x.reshape(-1, 1))), label='Predicted Function')
plt.ylim([-10000, 60000])
plt.ylabel('Price')
plt.legend()
# <h1 id="ref1">Part 1: Training and Testing</h1>
#
# <p>An important step in testing your model is to split your data into training and testing data. We will place the target data <b>price</b> in a separate dataframe <b>y</b>:</p>
#
y_data = df['price']
# drop price data in x data
#
x_data=df.drop('price',axis=1)
# Now we randomly split our data into training and testing data using the function <b>train_test_split</b>.
#
# +
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.10, random_state=1)
print("number of test samples :", x_test.shape[0])
print("number of training samples:",x_train.shape[0])
# -
# The <b>test_size</b> parameter sets the proportion of data that is split into the testing set. In the above, the testing set is set to 10% of the total dataset.
#
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #1):</h1>
#
# <b>Use the function "train_test_split" to split up the data set such that 40% of the data samples will be utilized for testing, set the parameter "random_state" equal to zero. The output of the function should be the following: "x_train_1" , "x_test_1", "y_train_1" and "y_test_1".</b>
#
# </div>
#
# Write your code below and press Shift+Enter to execute
x_train1, x_test1, y_train1, y_test1 = train_test_split(x_data, y_data, test_size=0.40, random_state=0)
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# x_train1, x_test1, y_train1, y_test1 = train_test_split(x_data, y_data, test_size=0.4, random_state=0)
# print("number of test samples :", x_test1.shape[0])
# print("number of training samples:",x_train1.shape[0])
#
# -->
#
# Let's import <b>LinearRegression</b> from the module <b>linear_model</b>.
#
from sklearn.linear_model import LinearRegression
# We create a Linear Regression object:
#
lre=LinearRegression()
# we fit the model using the feature horsepower
#
lre.fit(x_train[['horsepower']], y_train)
# Let's Calculate the R^2 on the test data:
#
lre.score(x_test[['horsepower']], y_test)
# we can see the R^2 is much smaller using the test data.
#
lre.score(x_train[['horsepower']], y_train)
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #2): </h1>
# <b>
# Find the R^2 on the test data using 90% of the data for training data
# </b>
# </div>
#
# Write your code below and press Shift+Enter to execute
x_train1, x_test1, y_train1, y_test1 = train_test_split(x_data, y_data, test_size=0.1, random_state=0)
lre.fit(x_train1[['horsepower']],y_train1)
lre.score(x_test1[['horsepower']],y_test1)
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# x_train1, x_test1, y_train1, y_test1 = train_test_split(x_data, y_data, test_size=0.1, random_state=0)
# lre.fit(x_train1[['horsepower']],y_train1)
# lre.score(x_test1[['horsepower']],y_test1)
#
# -->
#
# Sometimes you do not have sufficient testing data; as a result, you may want to perform Cross-validation. Let's go over several methods that you can use for Cross-validation.
#
# <h2>Cross-validation Score</h2>
#
# Lets import <b>model_selection</b> from the module <b>cross_val_score</b>.
#
from sklearn.model_selection import cross_val_score
# We input the object, the feature in this case ' horsepower', the target data (y_data). The parameter 'cv' determines the number of folds; in this case 4.
#
Rcross = cross_val_score(lre, x_data[['horsepower']], y_data, cv=4)
# The default scoring is R^2; each element in the array has the average R^2 value in the fold:
#
Rcross
# We can calculate the average and standard deviation of our estimate:
#
print("The mean of the folds are", Rcross.mean(), "and the standard deviation is" , Rcross.std())
# We can use negative squared error as a score by setting the parameter 'scoring' metric to 'neg_mean_squared_error'.
#
-1 * cross_val_score(lre,x_data[['horsepower']], y_data,cv=4,scoring='neg_mean_squared_error')
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #3): </h1>
# <b>
# Calculate the average R^2 using two folds, find the average R^2 for the second fold utilizing the horsepower as a feature :
# </b>
# </div>
#
# Write your code below and press Shift+Enter to execute
Rcross1 = cross_val_score(lre, x_data[['horsepower']], y_data, cv=2)
Rcross1.mean()
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# Rc=cross_val_score(lre,x_data[['horsepower']], y_data,cv=2)
# Rc.mean()
#
# -->
#
# You can also use the function 'cross_val_predict' to predict the output. The function splits up the data into the specified number of folds, using one fold for testing and the other folds are used for training. First import the function:
#
from sklearn.model_selection import cross_val_predict
# We input the object, the feature in this case <b>'horsepower'</b> , the target data <b>y_data</b>. The parameter 'cv' determines the number of folds; in this case 4. We can produce an output:
#
yhat = cross_val_predict(lre,x_data[['horsepower']], y_data,cv=4)
yhat[0:5]
# <h1 id="ref2">Part 2: Overfitting, Underfitting and Model Selection</h1>
#
# <p>It turns out that the test data sometimes referred to as the out of sample data is a much better measure of how well your model performs in the real world. One reason for this is overfitting; let's go over some examples. It turns out these differences are more apparent in Multiple Linear Regression and Polynomial Regression so we will explore overfitting in that context.</p>
#
# Let's create Multiple linear regression objects and train the model using <b>'horsepower'</b>, <b>'curb-weight'</b>, <b>'engine-size'</b> and <b>'highway-mpg'</b> as features.
#
lr = LinearRegression()
lr.fit(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']], y_train)
# Prediction using training data:
#
yhat_train = lr.predict(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
yhat_train[0:5]
# Prediction using test data:
#
yhat_test = lr.predict(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
yhat_test[0:5]
# Let's perform some model evaluation using our training and testing data separately. First we import the seaborn and matplotlibb library for plotting.
#
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
# Let's examine the distribution of the predicted values of the training data.
#
Title = 'Distribution Plot of Predicted Value Using Training Data vs Training Data Distribution'
DistributionPlot(y_train, yhat_train, "Actual Values (Train)", "Predicted Values (Train)", Title)
# Figure 1: Plot of predicted values using the training data compared to the training data.
#
# So far the model seems to be doing well in learning from the training dataset. But what happens when the model encounters new data from the testing dataset? When the model generates new values from the test data, we see the distribution of the predicted values is much different from the actual target values.
#
Title='Distribution Plot of Predicted Value Using Test Data vs Data Distribution of Test Data'
DistributionPlot(y_test,yhat_test,"Actual Values (Test)","Predicted Values (Test)",Title)
# Figur 2: Plot of predicted value using the test data compared to the test data.
#
# <p>Comparing Figure 1 and Figure 2; it is evident the distribution of the test data in Figure 1 is much better at fitting the data. This difference in Figure 2 is apparent where the ranges are from 5000 to 15 000. This is where the distribution shape is exceptionally different. Let's see if polynomial regression also exhibits a drop in the prediction accuracy when analysing the test dataset.</p>
#
from sklearn.preprocessing import PolynomialFeatures
# <h4>Overfitting</h4>
# <p>Overfitting occurs when the model fits the noise, not the underlying process. Therefore when testing your model using the test-set, your model does not perform as well as it is modelling noise, not the underlying process that generated the relationship. Let's create a degree 5 polynomial model.</p>
#
# Let's use 55 percent of the data for training and the rest for testing:
#
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.45, random_state=0)
# We will perform a degree 5 polynomial transformation on the feature <b>'horse power'</b>.
#
pr = PolynomialFeatures(degree=5)
x_train_pr = pr.fit_transform(x_train[['horsepower']])
x_test_pr = pr.fit_transform(x_test[['horsepower']])
pr
# Now let's create a linear regression model "poly" and train it.
#
poly = LinearRegression()
poly.fit(x_train_pr, y_train)
# We can see the output of our model using the method "predict." then assign the values to "yhat".
#
yhat = poly.predict(x_test_pr)
yhat[0:5]
# Let's take the first five predicted values and compare it to the actual targets.
#
print("Predicted values:", yhat[0:4])
print("True values:", y_test[0:4].values)
# We will use the function "PollyPlot" that we defined at the beginning of the lab to display the training data, testing data, and the predicted function.
#
PollyPlot(x_train[['horsepower']], x_test[['horsepower']], y_train, y_test, poly,pr)
# Figur 4 A polynomial regression model, red dots represent training data, green dots represent test data, and the blue line represents the model prediction.
#
# We see that the estimated function appears to track the data but around 200 horsepower, the function begins to diverge from the data points.
#
# R^2 of the training data:
#
poly.score(x_train_pr, y_train)
# R^2 of the test data:
#
poly.score(x_test_pr, y_test)
# We see the R^2 for the training data is 0.5567 while the R^2 on the test data was -29.87. The lower the R^2, the worse the model, a Negative R^2 is a sign of overfitting.
#
# Let's see how the R^2 changes on the test data for different order polynomials and plot the results:
#
# +
Rsqu_test = []
order = [1, 2, 3, 4]
for n in order:
pr = PolynomialFeatures(degree=n)
x_train_pr = pr.fit_transform(x_train[['horsepower']])
x_test_pr = pr.fit_transform(x_test[['horsepower']])
lr.fit(x_train_pr, y_train)
Rsqu_test.append(lr.score(x_test_pr, y_test))
plt.plot(order, Rsqu_test)
plt.xlabel('order')
plt.ylabel('R^2')
plt.title('R^2 Using Test Data')
plt.text(3, 0.75, 'Maximum R^2 ')
# -
# We see the R^2 gradually increases until an order three polynomial is used. Then the R^2 dramatically decreases at four.
#
# The following function will be used in the next section; please run the cell.
#
def f(order, test_data):
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=test_data, random_state=0)
pr = PolynomialFeatures(degree=order)
x_train_pr = pr.fit_transform(x_train[['horsepower']])
x_test_pr = pr.fit_transform(x_test[['horsepower']])
poly = LinearRegression()
poly.fit(x_train_pr,y_train)
PollyPlot(x_train[['horsepower']], x_test[['horsepower']], y_train,y_test, poly, pr)
# The following interface allows you to experiment with different polynomial orders and different amounts of data.
#
interact(f, order=(0, 6, 1), test_data=(0.05, 0.95, 0.05))
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #4a):</h1>
#
# <b>We can perform polynomial transformations with more than one feature. Create a "PolynomialFeatures" object "pr1" of degree two?</b>
#
# </div>
#
pr1 = PolynomialFeatures(degree=2)
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# pr1=PolynomialFeatures(degree=2)
#
# -->
#
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #4b): </h1>
#
# <b>
# Transform the training and testing samples for the features 'horsepower', 'curb-weight', 'engine-size' and 'highway-mpg'. Hint: use the method "fit_transform"
# ?</b>
# </div>
#
# +
x_train_pr1=pr1.fit_transform(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
x_test_pr1=pr1.fit_transform(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
# -
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# x_train_pr1=pr1.fit_transform(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
#
# x_test_pr1=pr1.fit_transform(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
#
# -->
#
# <!-- The answer is below:
#
# x_train_pr1=pr.fit_transform(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
# x_test_pr1=pr.fit_transform(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']])
#
# -->
#
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #4c): </h1>
# <b>
# How many dimensions does the new feature have? Hint: use the attribute "shape"
# </b>
# </div>
#
x_train_pr1.shape
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# x_train_pr1.shape #There are now 15 features:
#
# -->
#
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #4d): </h1>
#
# <b>
# Create a linear regression model "poly1" and train the object using the method "fit" using the polynomial features?</b>
# </div>
#
poly1=LinearRegression().fit(x_train_pr1,y_train)
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# poly1=LinearRegression().fit(x_train_pr1,y_train)
#
# -->
#
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #4e): </h1>
# <b>Use the method "predict" to predict an output on the polynomial features, then use the function "DistributionPlot" to display the distribution of the predicted output vs the test data?</b>
# </div>
#
yhat_test1=poly1.predict(x_test_pr1)
Title='Distribution Plot of Predicted Value Using Test Data vs Data Distribution of Test Data'
DistributionPlot(y_test, yhat_test1, "Actual Values (Test)", "Predicted Values (Test)", Title)
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# yhat_test1=poly1.predict(x_test_pr1)
# Title='Distribution Plot of Predicted Value Using Test Data vs Data Distribution of Test Data'
# DistributionPlot(y_test, yhat_test1, "Actual Values (Test)", "Predicted Values (Test)", Title)
#
# -->
#
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #4f): </h1>
#
# <b>Using the distribution plot above, explain in words about the two regions were the predicted prices are less accurate than the actual prices</b>
#
# </div>
#
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# The predicted value is higher than actual value for cars where the price $ 10,000 range, conversely the predicted price is lower than the price cost in the $30,000 to $40,000 range. As such the model is not as accurate in these ranges .
#
# -->
#
# <h2 id="ref3">Part 3: Ridge regression</h2>
#
# In this section, we will review Ridge Regression we will see how the parameter Alfa changes the model. Just a note here our test data will be used as validation data.
#
# Let's perform a degree two polynomial transformation on our data.
#
pr=PolynomialFeatures(degree=2)
x_train_pr=pr.fit_transform(x_train[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg','normalized-losses','symboling']])
x_test_pr=pr.fit_transform(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg','normalized-losses','symboling']])
# Let's import <b>Ridge</b> from the module <b>linear models</b>.
#
from sklearn.linear_model import Ridge
# Let's create a Ridge regression object, setting the regularization parameter to 0.1
#
RigeModel=Ridge(alpha=0.1)
# Like regular regression, you can fit the model using the method <b>fit</b>.
#
RigeModel.fit(x_train_pr, y_train)
# Similarly, you can obtain a prediction:
#
yhat = RigeModel.predict(x_test_pr)
# Let's compare the first five predicted samples to our test set
#
print('predicted:', yhat[0:4])
print('test set :', y_test[0:4].values)
# We select the value of Alpha that minimizes the test error, for example, we can use a for loop.
#
Rsqu_test = []
Rsqu_train = []
dummy1 = []
Alpha = 10 * np.array(range(0,1000))
for alpha in Alpha:
RigeModel = Ridge(alpha=alpha)
RigeModel.fit(x_train_pr, y_train)
Rsqu_test.append(RigeModel.score(x_test_pr, y_test))
Rsqu_train.append(RigeModel.score(x_train_pr, y_train))
# We can plot out the value of R^2 for different Alphas
#
# +
width = 12
height = 10
plt.figure(figsize=(width, height))
plt.plot(Alpha,Rsqu_test, label='validation data ')
plt.plot(Alpha,Rsqu_train, 'r', label='training Data ')
plt.xlabel('alpha')
plt.ylabel('R^2')
plt.legend()
# -
# **Figure 6**:The blue line represents the R^2 of the validation data, and the red line represents the R^2 of the training data. The x-axis represents the different values of Alpha.
#
# Here the model is built and tested on the same data. So the training and test data are the same.
#
# The red line in figure 6 represents the R^2 of the test data.
# As Alpha increases the R^2 decreases.
# Therefore as Alpha increases the model performs worse on the test data.
#
# The blue line represents the R^2 on the validation data.
# As the value for Alpha increases the R^2 increases and converges at a point
#
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #5): </h1>
#
# Perform Ridge regression and calculate the R^2 using the polynomial features, use the training data to train the model and test data to test the model. The parameter alpha should be set to 10.
#
# </div>
#
# Write your code below and press Shift+Enter to execute
RigeModel = Ridge(alpha=10)
RigeModel.fit(x_train_pr, y_train)
RigeModel.score(x_test_pr, y_test)
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# RigeModel = Ridge(alpha=10)
# RigeModel.fit(x_train_pr, y_train)
# RigeModel.score(x_test_pr, y_test)
#
# -->
#
# <h2 id="ref4">Part 4: Grid Search</h2>
#
# The term Alfa is a hyperparameter, sklearn has the class <b>GridSearchCV</b> to make the process of finding the best hyperparameter simpler.
#
# Let's import <b>GridSearchCV</b> from the module <b>model_selection</b>.
#
from sklearn.model_selection import GridSearchCV
# We create a dictionary of parameter values:
#
parameters1= [{'alpha': [0.001,0.1,1, 10, 100, 1000, 10000, 100000, 100000]}]
parameters1
# Create a ridge regions object:
#
RR=Ridge()
RR
# Create a ridge grid search object
#
Grid1 = GridSearchCV(RR, parameters1,cv=4)
# Fit the model
#
Grid1.fit(x_data[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']], y_data)
# The object finds the best parameter values on the validation data. We can obtain the estimator with the best parameters and assign it to the variable BestRR as follows:
#
BestRR=Grid1.best_estimator_
BestRR
# We now test our model on the test data
#
BestRR.score(x_test[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']], y_test)
# <div class="alert alert-danger alertdanger" style="margin-top: 20px">
# <h1> Question #6): </h1>
# Perform a grid search for the alpha parameter and the normalization parameter, then find the best values of the parameters
# </div>
#
# Write your code below and press Shift+Enter to execute
# Double-click <b>here</b> for the solution.
#
# <!-- The answer is below:
#
# parameters2= [{'alpha': [0.001,0.1,1, 10, 100, 1000,10000,100000,100000],'normalize':[True,False]} ]
# Grid2 = GridSearchCV(Ridge(), parameters2,cv=4)
# Grid2.fit(x_data[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']],y_data)
# Grid2.best_estimator_
#
# -->
#
# ### Thank you for completing this lab!
#
# ## Author
#
# <a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank"><NAME></a>
#
# ### Other Contributors
#
# <a href="https://www.linkedin.com/in/mahdi-noorian-58219234/" target="_blank"><NAME> PhD</a>
#
# <NAME>
#
# <NAME>
#
# <NAME>
#
# Parizad
#
# <NAME>
#
# <a href="https://www.linkedin.com/in/fiorellawever/" target="_blank"><NAME></a>
#
# <a href=" https://www.linkedin.com/in/yi-leng-yao-84451275/ " target="_blank" ><NAME></a>.
#
# ## Change Log
#
# | Date (YYYY-MM-DD) | Version | Changed By | Change Description |
# | ----------------- | ------- | ---------- | ----------------------------------- |
# | 2020-10-05 | 2.2 | Lakshmi | Removed unused library imports |
# | 2020-09-14 | 2.1 | Lakshmi | Made changes in OverFitting section |
# | 2020-08-27 | 2.0 | Lavanya | Moved lab to course repo in GitLab |
#
# <hr>
#
# ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
#
| 6-Data_Analysis_with_Python/5 Model Evaluation and Refinement/DA0101EN-5-Review-Model-Evaluation-and-Refinement.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# !python --version
from pandas import Panel
# !pip install opencv-python
# +
import cv2
import os
#os.chdir('')
img = cv2.imread('../Movie_Poster_Dataset/1980/tt0079302.jpg', cv2.COLOR_BGR2RGB)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# -
type(img)
cv2.imshow('Film', img)
# +
import matplotlib.pyplot as plt
imgplot = plt.imshow(img)
plt.show()
# -
img.shape
# +
# see the highest, lowest and average pixel
for i in range(img.shape[2]):
channel = img[:,:,i]
print(channel.shape)
print(channel.max())
print(channel.min())
print(channel.mean())
# +
# check on the info to see if can be acquired from the ID in JSON
import json
filename = '../groundtruth/1980.txt'
n = 0
with open(filename) as fh:
print(fh)
for l in fh:
#print(l)
str_val = l.strip().split(':', 1)
print(str_val)
n+=1
if n == 100:
break
# +
all_dict = {}
n = 0
with open(filename) as fh:
print(fh)
for l in fh:
#print(l)
str_val = l.strip().split(':', 1)
#print(len(str_val))
n+=1
if (len(str_val) == 1) & (str_val[0] == '{'):
film_dict = {}
elif (len(str_val) == 1) & (str_val[0] == '}'):
all_dict[n] = film_dict
elif (len(str_val) == 2):
key = str_val[0]
value = str_val[1]
#print(key.replace("\"", ""))
#print(value.replace("\"", "").replace(",", ""))
film_dict[key.replace("\"", "").strip()] = value.replace("\"", "").replace(",", "").strip()
if n == 201:
break
print(film_dict)
print(all_dict)
# +
# loop through setup of folders to get max/min and average values.
import os
import numpy as np
rootdir = '/Users/MStamp/Documents/2021 Learning/CV Course Continuation/CV-Assessments/Movie_Poster_Dataset/'
os.chdir(rootdir)
folders = os.listdir()
max_height = ['',0]
max_width = ['',0]
average_vals_per_channel = np.array(['',0,0,0,0]) # image - channel - min, max, average
print(folders)
for f in folders:
if f == '.DS_Store':
continue
print(f)
os.chdir(rootdir + f)
images = os.listdir()
for i in images:
img = cv2.imread(i, cv2.COLOR_BGR2RGB)
#print(img.shape[0])
#print(img.shape[1])
if img.shape[0] > max_height[1]:
max_height = [f + "/" + i, img.shape[0]]
if img.shape[1] > max_width[1]:
max_width = [f + "/" + i, img.shape[1]]
for c in range(img.shape[2]):
channel = img[:,:,c]
channel = np.array([i, c, channel.max(), channel.min(), channel.mean()])
if c == 0:
all_channels = channel
else:
all_channels = np.concatenate((all_channels, channel), axis=0)
average_vals_per_channel = np.concatenate((average_vals_per_channel, all_channels), axis=0)
# -
max_height
max_width
average_vals_per_channel
| Assessment 1 Setup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# name: python385jvsc74a57bd0b4760074067732e81c03fa2f3451350d5e78e45f688639748e9cab7f332de886
# ---
import pandas as pd
import researchpy as rp
import scipy.stats as stats
import numpy as np
from matplotlib import pyplot
from IPython.display import Image
# # Начнем с морских гребешков
data = pd.read_csv('data/abalone.csv')
data.head(3)
half = len(data['Length']) / 2
sam1 = data.loc[:half, 'Length']
sam2 = data.loc[half:, 'Length']
stats.ttest_ind(sam2, sam1)
# # Дисперсионный анализ и раковины
# +
#Дисперсионный анализ.
from scipy.stats import f_oneway
# Вот некоторые данные [3] об измерении раковины (длина рубца передней приводящей мышцы,
# стандартизированная путем деления на длину) у мидии Mytilus trossulus из пяти мест:
# Тилламук, Орегон; Ньюпорт, Орегон; Петербург, Аляска; Магадан, Россия;
# и Tvarminne, Финляндия, взяты из гораздо большего набора данных, использованных McDonald et al. (1991).
tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735, 0.0659, 0.0923, 0.0836]
newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,0.0725]
petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764, 0.0689]
tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
F, p = f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
alpha = 0.05 # Уровень значимости
print(F, p)
if p > alpha:
print('Same distribution (fail to reject H0)')
else:
print('Different distribution (reject H0)')
# -
# # Можете привести пример когда выборки зависимы, а когда нет ?
# +
# Тест Вилкоксона для независимых выборок также называется критерием Манна-Уитни
from numpy.random import seed
from numpy.random import randn
from scipy.stats import mannwhitneyu
# seed the random number generator
seed(1)
# Генерим две независимых выборки
data1 = 5 * randn(100) + 50
data2 = 5 * randn(100) + 51
# Сравнение образцов
stat, p = mannwhitneyu(data1, data2)
print('Statistics=%.3f, p=%.3f' % (stat, p))
# Интерпретируем
alpha = 0.05 # Уровень значимости
if p > alpha:
print('Same distribution (fail to reject H0)')
else:
print('Different distribution (отвергнуть H0)')
# +
# pd.DataFrame(data1).hist()
# +
# pd.DataFrame(data2).hist()
# -
# # Какие выборки зависимы? Приведите примеров
# +
# Wilcoxon signed-rank test
from numpy.random import seed
from numpy.random import randn
from scipy.stats import wilcoxon
seed(1)
# Генерим две независимых выборки
data1 = 5 * randn(100) + 50
data2 = 5 * randn(100) + 51
# compare samples
stat, p = wilcoxon(data1, data2)
print('Statistics=%.3f, p=%.3f' % (stat, p))
# interpret
alpha = 0.05
if p > alpha:
print('Same distribution (fail to reject H0)')
else:
print('Different distribution (reject H0)')
# -
# # Задания для самостоятельного решения
def analysis(d1, d2):
stat, p = wilcoxon(data1, data2)
print('Statistics=%.3f, p=%.3f' % (stat, p))
# interpret
alpha = 0.05
if p > alpha:
print('Нет различий, не отвергаем нулевую гипотезу')
else:
print('Эффект различный, отвергаем нулевую гипотезу')
# #### 1. Вам даны результаты проверки двух препаратов. Требуется оценить, есть ли различие в эффекте у этих пациентов.
#
# A - результаты для выборки людей получающих препарат А.
#
# B - результаты для другой выборки людей, получающих препарат B.
np.random.seed(11)
A=stats.norm.rvs(scale=30,loc=11,size=100)
B=A+stats.norm.rvs(scale=54,loc=11,size=100)
analysis(A, B)
# Вывод: Эффект различный
# #### 2. Вам даны результаты проверки двух препаратов. Требуется оценить, есть ли различие в эффекте у этих пациентов.
#
# А - те же люди, но без препарата.
#
# B - выборка получающая препарат B
np.random.seed(11)
A=stats.norm.rvs(scale=30,loc=11,size=100)
B=A+stats.norm.rvs(scale=54,loc=11,size=100)
analysis(A, B)
# Вывод: Эффект есть, препарат работает
# #### 3. Допустим вы решили устроить дачный эксперимент. Берем и поливаем одни огурцы водой, другие огурцы водой с удобнением, третью группу огурцов будем поливать минералкой. Используя дисперсионный ананлиз, сделайте выводы о распредлениях результатов. Если ли эффект от удобрения по сравнению с минералкой?
#
#
# water = [1,2,3,4,2,4,2,4,5,2,3,4,2,1,3,4,3,2,5,1]
#
# nutri = [1,2,4,6,5,6,7,5,4,5,6,7,4,3,5,5,6,5,4,3,5]
#
# mineral =[2,1,1,3,2,4,2,4,5,4,3,2,3,2,3,1,3,4,5,1,4]
water = [1,2,3,4,2,4,2,4,5,2,3,4,2,1,3,4,3,2,5,1]
nutri = [1,2,4,6,5,6,7,5,4,5,6,7,4,3,5,5,6,5,4,3,5]
mineral =[2,1,1,3,2,4,2,4,5,4,3,2,3,2,3,1,3,4,5,1,4]
# +
from scipy.stats import f_oneway
F, p = f_oneway(water, nutri, mineral)
alpha = 0.05 # Уровень значимости
print(F, p)
if p > alpha:
print('Same distribution (fail to reject H0)')
else:
print('Different distribution (reject H0)')
# +
# Вывод: Эффект от полива удобрениями есть
# -
# # Cookie Cats
# Cookie Cats - чрезвычайно популярная мобильная игра-головоломка, разработанная Tactile Entertainment. Это классическая игра-головоломка в стиле «соедините три», в которой игрок должен соединять плитки одного цвета, чтобы очистить поле и выиграть уровень. Здесь также есть поющие кошки. Посмотрите эту короткую демонстрацию:
Image("data/cookie_cats_video.jpeg")
# По мере прохождения уровней игры игроки время от времени сталкиваются с воротами, которые заставляют их ждать нетривиальное количество времени или совершать покупки в приложении, чтобы продолжить. Помимо стимулирования покупок в приложении, эти ворота служат важной цели - дать игрокам вынужденный перерыв в игре, что, как мы надеемся, приведет к увеличению и продлению удовольствия от игры.
# Но где ставить ворота? Первоначально первые ворота были размещены на уровне 30. В этом проекте вам предлагается проанализировать AB-тест, в котором создатели переместили первые ворота в Cookie Cats с 30 уровня на уровень 40. В частности, вам надо рассмотрим влияние A/B теста на удержание игроков.
Image("data/cc_gates.png")
# # Данные A/B тестирования
data = pd.read_csv('data/cookie_cats.csv')
data.head()
# ### Данные получены от 90 189 игроков, которые установили игру во время проведения AB-теста. Переменные:
#
# userid - уникальный номер, идентифицирующий каждого игрока.
#
# версия - был ли игрок помещен в контрольную группу (gate_30 - ворота на уровне 30) или в тестовую группу (gate_40 - ворота на уровне 40).
#
# sum_gamerounds - количество игровых раундов, сыгранных игроком в течение первой недели после установки
#
# retention_1 - проигрыватель вернулся и поиграл через 1 день после установки?
#
# retention_7 - проигрыватель вернулся и играл через 7 дней после установки?
#
# Когда игрок устанавливал игру, ему случайным образом назначали gate_30 или gate_40.
#Сгрупируем результаты теста по версиям игры.
data.groupby('version').count()
# +
# Большинство юзеров играло в gate_30 версию
# -
# Подсчет количества игроков для каждого количества раундов
plot_df = data.groupby('sum_gamerounds')['userid'].count()
plot_df.hist(bins=30)
plot_df.head()
# +
# Построим график распределения игроков, сыгравших от 0 до 100 игровых раундов в течение своей первой недели игры.
# %matplotlib inline
ax = plot_df[:100].plot(figsize=(10,6))
ax.set_title("The number of players that played 0-100 game rounds during the first week")
ax.set_ylabel("Number of Players")
ax.set_xlabel('# Game rounds')
# +
# Creating an list with bootstrapped means for each AB-group
boot_1d = []
boot_7d = []
for i in range(1000):
data_sample = data.sample(frac = 1,replace = True).groupby('version')
boot_1d_mean = data_sample['retention_1'].mean()
boot_1d.append(boot_1d_mean)
boot_7d_mean = data_sample['retention_7'].mean()
boot_7d.append(boot_7d_mean)
# Преобразование списка в DataFrame
boot_1d = pd.DataFrame(boot_1d)
boot_7d = pd.DataFrame(boot_7d)
# A Kernel Density Estimate plot of the bootstrap distributions
boot_1d.plot(kind='density')
boot_7d.plot(kind='density')
# +
# Для игроков игравших один день эффект неоднозначен,
# для игроков игравших более 7ми дней, выборки отличаются, эффект не случайный (отвергаем нулевую гипотезу)
| analysis_of_variance/Analysis_of_Variance_practice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Implementation
# This section shows how the linear regression extensions discussed in this chapter are typically fit in Python. First let's import the {doc}`Boston housing</content/appendix/data>` dataset.
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import datasets
boston = datasets.load_boston()
X_train = boston['data']
y_train = boston['target']
# ## Regularized Regression
# Both Ridge and Lasso regression can be easily fit using `scikit-learn`. A bare-bones implementation is provided below. Note that the regularization parameter `alpha` (which we called $\lambda$) is chosen arbitrarily.
# +
from sklearn.linear_model import Ridge, Lasso
alpha = 1
# Ridge
ridge_model = Ridge(alpha = alpha)
ridge_model.fit(X_train, y_train)
# Lasso
lasso_model = Lasso(alpha = alpha)
lasso_model.fit(X_train, y_train);
# -
# In practice, however, we want to choose `alpha` through cross validation. This is easily implemented in `scikit-learn` by designating a set of `alpha` values to try and fitting the model with `RidgeCV` or `LassoCV`.
# +
from sklearn.linear_model import RidgeCV, LassoCV
alphas = [0.01, 1, 100]
# Ridge
ridgeCV_model = RidgeCV(alphas = alphas)
ridgeCV_model.fit(X_train, y_train)
# Lasso
lassoCV_model = LassoCV(alphas = alphas)
lassoCV_model.fit(X_train, y_train);
# -
# We can then see which values of `alpha` performed best with the following.
print('Ridge alpha:', ridgeCV.alpha_)
print('Lasso alpha:', lassoCV.alpha_)
# ## Bayesian Regression
# We can also fit Bayesian regression using `scikit-learn` (though another popular package is `pymc3`). A very straightforward implementation is provided below.
from sklearn.linear_model import BayesianRidge
bayes_model = BayesianRidge()
bayes_model.fit(X_train, y_train);
# This is not, however, identical to our construction in the previous section since it infers the $\sigma^2$ and $\tau$ parameters, rather than taking those as fixed inputs. More information can be found [here](https://scikit-learn.org/stable/modules/linear_model.html#bayesian-regression). The hidden chunk below demonstrates a hacky solution for running Bayesian regression in `scikit-learn` using known values for $\sigma^2$ and $\tau$, though it is hard to imagine a practical reason to do so
# ````{toggle}
# By default, Bayesian regression in `scikit-learn` treats $\alpha = \frac{1}{\sigma^2}$ and $\lambda = \frac{1}{\tau}$ as random variables and assigns them the following prior distributions
#
# $$
# \begin{aligned}
# \alpha &\sim \text{Gamma}(\alpha_1, \alpha_2)
# \\
# \lambda &\sim \text{Gamma}(\lambda_1, \lambda_2).
# \end{aligned}
# $$
#
# Note that $E(\alpha) = \frac{\alpha_1}{\alpha_2}$ and $E(\lambda) = \frac{\lambda_1}{\lambda_2}$. To *fix* $\sigma^2$ and $\tau$, we can provide an extremely strong prior on $\alpha$ and $\lambda$, guaranteeing that their estimates will be approximately equal to their expected value.
#
# Suppose we want to use $\sigma^2 = 11.8$ and $\tau = 10$, or equivalently $\alpha = \frac{1}{11.8}$, $\lambda = \frac{1}{10}$. Then let
#
# $$
# \begin{aligned}
# \alpha_1 &= 10000 \cdot \frac{1}{11.8}, \\
# \alpha_2 &= 10000, \\
# \lambda_1 &= 10000 \cdot \frac{1}{10}, \\
# \lambda_2 &= 10000.
# \end{aligned}
# $$
#
# This guarantees that $\sigma^2$ and $\tau$ will be approximately equal to their pre-determined values. This can be implemented in `scikit-learn` as follows
#
# ```{code}
# big_number = 10**5
#
# # alpha
# alpha = 1/11.8
# alpha_1 = big_number*alpha
# alpha_2 = big_number
#
# # lambda
# lam = 1/10
# lambda_1 = big_number*lam
# lambda_2 = big_number
#
# # fit
# bayes_model = BayesianRidge(alpha_1 = alpha_1, alpha_2 = alpha_2, alpha_init = alpha,
# lambda_1 = lambda_1, lambda_2 = lambda_2, lambda_init = lam)
# bayes_model.fit(X_train, y_train);
# ```
#
# ````
# ## Poisson Regression
# GLMs are most commonly fit in Python through the `GLM` class from `statsmodels`. A simple Poisson regression example is given below.
#
# As we saw in the GLM concept section, a GLM is comprised of a random distribution and a link function. We identify the random distribution through the `family` argument to `GLM` (e.g. below, we specify the `Poisson` family). The default link function depends on the random distribution. By default, the Poisson model uses the link function
#
# $$
# \eta_n = g(\mu_n) = \log(\lambda_n),
# $$
#
# which is what we use below. For more information on the possible distributions and link functions, check out the `statsmodels` GLM [docs](https://www.statsmodels.org/stable/glm.html).
# +
import statsmodels.api as sm
X_train_with_constant = sm.add_constant(X_train)
poisson_model = sm.GLM(y_train, X_train, family=sm.families.Poisson())
poisson_model.fit();
| content/c2/code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
# %matplotlib inline
df_samp = pd.read_csv('./data/reviews/reviews_01_2019.csv')
df_samp.head()
# +
df_samp['date'] = pd.to_datetime(df_samp['date'], yearfirst=True)
df_samp['year'] = df_samp['date'].dt.year
df_samp['month'] = df_samp['date'].dt.month
df_samp2 = df_samp.groupby(['year','month','listing_id']).size().reset_index()
df_samp2.rename({0: 'count'}, axis=1, inplace=True)
df_samp2.head()
# -
df_samp2['count'].unique()
plt.hist(df_samp2['count'])
# +
df_samp3 = df_samp2[(df_samp2.year == 2018) | (df_samp2.year == 2019)]
# df_samp3['monthly_availability'] = df_samp3['count']/30
# df_samp3 = df_samp3.groupby(['month','listing_id']).size().reset_index()
# df_samp3 = df_samp3.groupby(['month', 'listing_id'], as_index=False).mean().groupby('month')['monthly_availability'].mean().reset_index()
df_samp3 = df_samp3.groupby(['month', 'listing_id'], as_index=False).mean()
print(df_samp3[df_samp3['listing_id'] == 2595])
# +
skipList = {
2015: {
2: True,
4: True,
7: True
},
2016: {
3: True
},
2019: {
7: True,
8: True,
9: True,
10: True,
11: True,
12: True
},
}
reviews_df = pd.DataFrame({
"date": [],
"year": [],
"month": [],
"count": []
})
for yr in range(2015, 2020):
for mo in range(1,13):
if yr == 2019 and mo > 6:
break
elif yr in skipList and mo in skipList[yr]:
# do nothing
print('skipped')
else:
zeroStr = '0' if mo < 10 else ''
df_rev = pd.read_csv('./data/reviews/reviews' + '_' + zeroStr + str(mo) + '_' + str(yr) + '.csv')
dateObj = pd.to_datetime('1/'+str(mo)+'/'+str(yr), dayfirst=True)
df_temp = pd.DataFrame({
"date": [dateObj],
"year": [dateObj.year],
"month": [dateObj.month],
"count": df_rev.shape[0]
})
reviews_df = reviews_df.append(df_temp)
reviews_df = reviews_df.set_index("date")
print(reviews_df.head())
print(reviews_df.shape)
# -
sns.set(rc={'figure.figsize': (11,4)})
reviews_df['count'].plot(linewidth=2);
plt.title("Total Reviews, NYC")
plt.savefig("total_reviews_time.png")
| Reviews Time Series.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
### ESOL predictor: GCNN, random train/validate/test splits, representation = ConvMol object (from DeepChem)
# -
###load data from CSV in same folder as notebook
from deepchem.utils.save import load_from_disk
dataset_file= "./esol.csv"
dataset = load_from_disk(dataset_file)
print("Columns of dataset: %s" % str(dataset.columns.values))
print("Number of examples in dataset: %s" % str(dataset.shape[0]))
# +
###plot histogram of data to show distribution
# %matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
solubilities = np.array(dataset["measured log solubility in mols per litre"])
n, bins, patches = plt.hist(solubilities, 50, facecolor='green', alpha=0.75)
plt.xlabel('Measured log-solubility in mols/liter')
plt.ylabel('Number of compounds')
plt.title(r'Histogram of solubilities')
plt.grid(True)
plt.show()
# -
###featurize the data using extended connectivity fingerprints
import deepchem as dc
#featurizer = dc.feat.CircularFingerprint(size=1024)
#featurizer = dc.feat.graph_features.WeaveFeaturizer()
featurizer = dc.feat.ConvMolFeaturizer()
loader = dc.data.CSVLoader(
tasks=["measured log solubility in mols per litre"], smiles_field="smiles",
featurizer=featurizer)
dataset = loader.featurize(dataset_file)
###randomly split data into train, validation, and test sets
splitter = dc.splits.RandomSplitter(dataset_file)
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(dataset, seed=0)
# +
###normalize all datasets
transformers = [dc.trans.NormalizationTransformer(transform_y=True, dataset=train_dataset)]
for dataset in [train_dataset, valid_dataset, test_dataset]:
for transformer in transformers:
dataset = transformer.transform(dataset)
# +
###fit the model to the data
#model = dc.models.MPNNModel(n_tasks=1)
model = dc.models.GraphConvModel(n_tasks=1, mode='regression', batch_size=50, random_seed=0, model_dir="./models/esol")
model.fit(train_dataset, nb_epoch=10, deterministic=True)
# -
dir(model)
model.tensorboard_log_frequency
# +
###evaluate the model's performance on train set
from deepchem.utils.evaluate import Evaluator
metric = dc.metrics.Metric(dc.metrics.r2_score)
evaluator = Evaluator(model, train_dataset, transformers)
r2score = evaluator.compute_model_performance([metric])
print(r2score)
### plot of train vs predicted train
predicted_train = model.predict(train_dataset)
true_train = train_dataset.y
plt.scatter(predicted_train, true_train)
plt.xlabel('Predicted esol')
plt.ylabel('Actual esol')
plt.title(r'Predicted esol vs. Actual esol of train set')
plt.xlim([-12,2])
plt.ylim([-12,2])
plt.plot([-12,2], [-12,2], color='k')
plt.show()
# +
###evaluate the model's performance on validation set
from deepchem.utils.evaluate import Evaluator
metric = dc.metrics.Metric(dc.metrics.r2_score)
evaluator = Evaluator(model, valid_dataset, transformers)
r2score = evaluator.compute_model_performance([metric])
print(r2score)
### plot of train vs predicted validation
predicted_valid = model.predict(valid_dataset)
true_valid = valid_dataset.y
plt.scatter(predicted_valid, true_valid)
plt.xlabel('Predicted esol')
plt.ylabel('Actual esol')
plt.title(r'Predicted esol vs. Actual esol of validation set')
plt.xlim([-12,2])
plt.ylim([-12,2])
plt.plot([-12,2], [-12,2], color='k')
plt.show()
# +
###evaluate the model's performance on test set
from deepchem.utils.evaluate import Evaluator
metric = dc.metrics.Metric(dc.metrics.r2_score)
evaluator = Evaluator(model, test_dataset, transformers)
r2score = evaluator.compute_model_performance([metric])
print(r2score)
### plot of train vs predicted train
predicted_test = model.predict(test_dataset)
true_test = test_dataset.y
plt.scatter(predicted_test, true_test)
plt.xlabel('Predicted esol')
plt.ylabel('Actual esol')
plt.title(r'Predicted esol vs. Actual esol of test set')
plt.xlim([-12,2])
plt.ylim([-12,2])
plt.plot([-12,2], [-12,2], color='k')
plt.show()
# -
model.model_dir = "./"
model.save()
| models/GroundTruthEsolModel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # An example notebook
# A [Jupyter notebooks](http://jupyter.org/) mixes blocks of explanatory text, like the one you're reading now, with cells containing Python code (_inputs_) and the results of executing it (_outputs_). The code and its output—if any—are marked by `In [N]` and `Out [N]`, respectively, with `N` being the index of the cell. You can see an example in the computations below:
def f(x, y):
return x + 2*y
a = 4
b = 2
f(a, b)
# By default, Jupyter displays the result of the last instruction as the output of a cell, like it did above; however, `print` statements can display further results.
print(a)
print(b)
print(f(b, a))
# Jupyter also knows a few specific data types, such as Pandas data frames, and displays them in a more readable way:
import pandas as pd
pd.DataFrame({ 'foo': [1,2,3], 'bar': ['a','b','c'] })
# The index of the cells shows the order of their execution. Jupyter doesn't constrain it; to avoid confusing people, though, you better write your notebooks so that the cells are executed in sequential order as displayed. All cells are executed in the global Python scope; this means that, as we execute the code, all variables, functions and classes defined in a cell are available to the ones that follow.
# Notebooks can also include plots, as in the following cell:
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
f = plt.figure(figsize=(10,2))
ax = f.add_subplot(1,1,1)
ax.plot([0, 0.25, 0.5, 0.75, 1.0], np.random.random(5))
# As you might have noted, the cell above also printed a textual representation of the object returned from the plot, since it's the result of the last instruction in the cell. To prevent this, you can add a semicolon at the end, as in the next cell.
f = plt.figure(figsize=(10,2))
ax = f.add_subplot(1,1,1)
ax.plot([0, 0.25, 0.5, 0.75, 1.0], np.random.random(5));
| example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:python-public-policy] *
# language: python
# name: conda-env-python-public-policy-py
# ---
# # Homework 3: Data visualization
#
# 1. Complete the **Coding** exercise below.
# 1. **Tutorial:** Go the first third of [Time Series Analysis with Pandas](https://www.dataquest.io/blog/tutorial-time-series-analysis-with-pandas/), up until the "Visualizing time series data" section.
#
# ## In-class exercise 1
# ### Step 1
#
# Load the request per capita dataset from https://storage.googleapis.com/python-public-policy/data/311_community_districts.csv.zip as `requests_by_cd` and display it.
# +
# your code here
# -
# ### Step 2
#
# Make a [histogram](https://plotly.com/python/histograms/) of the requests per capita.
# +
# your code here
# -
# ## In-class exercise 2
#
# Take the scatterplot example from [the lecture](https://padmgp-4506001-fall.rcnyu.org/user-redirect/notebooks/class_materials/lecture_3.ipynb) and [add a trendline](https://plotly.com/python/linear-fits/).
# +
# your code here
# -
# ## Coding
#
# We are going to look at the population count of different community districts over time.
# +
import plotly.express as px
# boilerplate for allowing PDF export
import plotly.io as pio
pio.renderers.default = "notebook_connected+pdf"
# -
# ### Step 1
#
# Read the data from the [New York City Population By Community Districts](https://data.cityofnewyork.us/City-Government/New-York-City-Population-By-Community-Districts/xi7c-iiu2/data) data set into a DataFrame called `pop_by_cd`. To get the URL:
#
# 1. Visit the page linked above.
# 1. Click `Export`.
# 1. Right-click `CSV`.
# 1. Click `Copy Link Address` (or `Location`, depending on your browser).
# +
# your code here
# -
# ### Step 2
#
# Prepare the data. Use the following code to [reshape](https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html) the DataFrame to have one row per community district per Census year.
# +
# turn the population columns into rows
populations = pd.melt(pop_by_cd, id_vars=['Borough', 'CD Number', 'CD Name'], var_name='year', value_name='population')
# turn the years into numbers
populations.year = populations.year.str.replace(' Population', '').astype(int)
populations
# -
# ### Step 3
#
# Create a line chart of the population over time for each community district in Manhattan. There should be one line for each.
#
# See the Plotly [Line Plot with column encoding color](https://plotly.com/python/line-charts/#line-plot-with-column-encoding-color) examples.
# +
# your code here
# -
# ### Step 4
#
# We are going to do some mapping using the `pop_by_cd` DataFrame from before. To do so, we need `borocd`s. Create that column with the values filled in. (See [Lecture 2](https://padmgp-4506001-fall.rcnyu.org/user-redirect/notebooks/class_materials/lecture_2.ipynb).)
# +
# your code here
# -
# ### Step 5
#
# Let make a [choropleth map](https://www.data-to-viz.com/graph/choropleth.html) showing the population change from 2000 to 2010 for each community district. Adapt the `.choropleth_mapbox()` example in [Lecture 3](https://padmgp-4506001-fall.rcnyu.org/user-redirect/notebooks/class_materials/lecture_3.ipynb).
#
# If you get an error about `choropleth_mapbox() got an unexpected keyword argument 'featureidkey'`, go back and do the `Setup` above.
# +
# your code here
# -
# ### Step 6
#
# ***Analysis: Washington Heights and Inwood (the tall skinny community district at the top of Manhattan) are "up and coming" neighborhoods. In a few sentences: Why might might the population have decreased?***
# YOUR ANSWER HERE
# Then, read the first three paragraphs of the `Demographics` section of [An Economic Snapshot of Washington Heights and Inwood from June 2015](https://www.osc.state.ny.us/osdc/rpt2-2016.pdf#page=2).
# ## Tutorials
#
# 1. Read [how to handle time series data in pandas](https://pandas.pydata.org/pandas-docs/stable/getting_started/intro_tutorials/09_timeseries.html)
# 1. Read the [Data Design Standards](https://xdgov.github.io/data-design-standards/)
# 1. Watch [this talk on audification/sonification](https://www.youtube.com/watch?v=55dIfA7C038). We won't be doing so in this class, but hopefully will provide some inspiration about different ways that data can be represented.
| hw_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:ap-northeast-2:806072073708:image/datascience-1.0
# ---
# # Part 5 : Create an End to End Pipeline
# <a id='overview-5'></a>
#
# ## [Overview](./0-AutoClaimFraudDetection.ipynb)
# * [Notebook 0 : Overview, Architecture and Data Exploration](./0-AutoClaimFraudDetection.ipynb)
# * [Notebook 1: Data Prep, Process, Store Features](./1-data-prep-e2e.ipynb)
# * [Notebook 2: Train, Check Bias, Tune, Record Lineage, and Register a Model](./2-lineage-train-assess-bias-tune-registry-e2e.ipynb)
# * [Notebook 3: Mitigate Bias, Train New Model, Store in Registry](./3-mitigate-bias-train-model2-registry-e2e.ipynb)
# * [Notebook 4: Deploy Model, Run Predictions](./4-deploy-run-inference-e2e.ipynb)
# * **[Notebook 5 : Create and Run an End-to-End Pipeline to Deploy the Model](./5-pipeline-e2e.ipynb)**
# * **[Architecture](#arch-5)**
# * **[Create an Automated Pipeline](#pipelines)**
# * **[Clean up](#cleanup)**
# 이 노트북에서는 전체 end-to-end 프로세스를 자동화하는 SageMaker Pipeline을 구축합니다. 처음에는 데이터 과학자의 입장에서 모든 단계를 수동으로 수행했습니다.
#
# 이제 ML 엔지니어 또는 MLOps 담당자의 입장에서 각 단계를 자동화하는 법을 살펴보겠습니다.
# ### Load stored variables
#
# 이전에 이 노트북을 실행한 경우, AWS에서 생성한 리소스를 재사용할 수 있습니다. 아래 셀을 실행하여 이전에 생성된 변수를 로드합니다. 기존 변수의 출력물이 표시되어야 합니다. 인쇄된 내용이 보이지 않으면 노트북을 처음 실행한 것일 수 있습니다.
# %store -r
# #%store
# **<font color='red'>Important</font>: StoreMagic 명령을 사용하여 변수를 검색하려면 이전 노트북을 실행해야 합니다.**
# ### Import libraries
# +
import json
import boto3
import pathlib
import sagemaker
import numpy as np
import pandas as pd
import awswrangler as wr
import demo_helpers
from sagemaker.xgboost.estimator import XGBoost
from sagemaker.workflow.pipeline import Pipeline
from sagemaker.workflow.steps import CreateModelStep
from sagemaker.sklearn.processing import SKLearnProcessor
from sagemaker.workflow.step_collections import RegisterModel
from sagemaker.workflow.steps import ProcessingStep, TrainingStep
from sagemaker.workflow.parameters import ParameterInteger, ParameterFloat, ParameterString
# -
# ### Set region and boto3 config
# +
# You can change this to a region of your choice
import sagemaker
region = sagemaker.Session().boto_region_name
print("Using AWS Region: {}".format(region))
boto3.setup_default_session(region_name=region)
boto_session = boto3.Session(region_name=region)
s3_client = boto3.client("s3", region_name=region)
sagemaker_boto_client = boto_session.client("sagemaker")
sagemaker_session = sagemaker.session.Session(
boto_session=boto_session, sagemaker_client=sagemaker_boto_client
)
sagemaker_role = sagemaker.get_execution_role()
account_id = boto3.client("sts").get_caller_identity()["Account"]
# +
# ======> Tons of output_paths
training_job_output_path = f"s3://{bucket}/{prefix}/training_jobs"
bias_report_output_path = f"s3://{bucket}/{prefix}/clarify-bias"
explainability_output_path = f"s3://{bucket}/{prefix}/clarify-explainability"
train_data_uri = f"s3://{bucket}/{prefix}/data/train/train.csv"
test_data_uri = f"s3://{bucket}/{prefix}/data/test/test.csv"
train_data_upsampled_s3_path = f"s3://{bucket}/{prefix}/data/train/upsampled/train.csv"
processing_dir = "/opt/ml/processing"
create_dataset_script_uri = f"s3://{bucket}/{prefix}/code/create_dataset.py"
pipeline_bias_output_path = f"s3://{bucket}/{prefix}/clarify-output/pipeline/bias"
deploy_model_script_uri = f"s3://{bucket}/{prefix}/code/deploy_model.py"
# ======> variables used for parameterizing the notebook run
flow_instance_count = 1
flow_instance_type = "ml.m5.4xlarge"
train_instance_count = 1
train_instance_type = "ml.m4.xlarge"
deploy_model_instance_type = "ml.m4.xlarge"
# -
# <a id ='arch-5'> </a>
# ### Architecture : Create a SageMaker Pipeline to Automate All the Steps from Data Prep to Model Deployment
# [overview](#overview-5)
#
# ![End to end pipeline architecture](./images/e2e-5-pipeline-v3b.png)
# <a id='pipelines'></a>
#
# ## SageMaker Pipeline
#
# - [Step 1: Claims Data Wrangler Preprocessing Step](#claims-data-wrangler)
# - [Step 2: Customers Data Wrangler Preprocessing step](#data-wrangler)
# - [Step 3: Dataset and train test split](#dataset-train-test)
# - [Step 4: Train XGboost Model](#pipe-train-xgb)
# - [Step 5: Model Pre-deployment](#pipe-pre-deploy)
# - [Step 6: Use Clarify to Detect Bias](#pipe-detect-bias)
# - [Step 7: Register Model](#pipe-Register-Model)
# - [Step 8: Combine the Pipeline Steps and Run](#define-pipeline)
#
#
# [back to overview](#overview-5)
#
#
#
# ___
# 이제 머신 러닝 워크플로의 각 단계를 수동으로 수행했으므로, 투명성과 모델 추적을 희생하지 않고 더 빠른 모델 실험을 허용하는 특정 단계를 수행할 수 있습니다. 이 섹션에서는 새 모델을 훈련하고 SageMaker에서 모델을 유지한 다음 모델을 레지스트리에 추가하는 파이프라인을 생성합니다.
# ### Pipeline parameters
#
# SageMaker Pipelines의 중요한 기능은 단계를 미리 정의할 수 있지만, 파이프라인을 다시 정의하지 않고도 실행시 해당 단계로 매개 변수를 변경할 수 있다는 것입니다. 이는 ParameterInteger, ParameterFloat 또는 ParameterString을 사용하여 나중에 `pipeline.start (parameters=parameters)`를 호출할 때, 수정할 수 있는 값을 사전에 정의함으로써 달성할 수 있습니다. 이러한 방식으로 특정 파라메터만 정의할 수 있습니다.
# +
train_instance_param = ParameterString(
name="TrainingInstance",
default_value="ml.m4.xlarge",
)
model_approval_status = ParameterString(
name="ModelApprovalStatus", default_value="PendingManualApproval"
)
# -
# ### Define Caching
#
# 캐싱을 사용하여 동일한 파이프라인을 재실행 시, 각 단계의 값을 다시 계산하는 대신 적중된 캐시값을 다음 단계로 전파합니다. 캐싱은 성공한 실행만 고려하며, 여러 개의 실행이 있는 경우 가장 최근에 성공한 실행에 대한 결과를 사용합니다.
# +
# from sagemaker.workflow.steps import CacheConfig
# cache_config = CacheConfig(enable_caching=True, expire_after="7d")
# -
# <a id='claims-data-wrangler'></a>
# ### Step 1: Claims Data Wranger Preprocessing Step
# [pipeline](#pipelines)
# #### Upload flow to S3
#
# 이것은 첫 번째 단계에 대한 입력이 되므로 S3에 있어야 합니다.
s3_client.upload_file(
Filename="claims.flow", Bucket=bucket, Key=f"{prefix}/dataprep-notebooks/claims.flow"
)
claims_flow_uri = f"s3://{bucket}/{prefix}/dataprep-notebooks/claims.flow"
print(f"Claims flow file uploaded to S3")
# #### Define the first Data Wrangler step's inputs
# +
with open("claims.flow", "r") as f:
claims_flow = json.load(f)
flow_step_inputs = []
# flow file contains the code for each transformation
flow_file_input = sagemaker.processing.ProcessingInput(
source=claims_flow_uri, destination=f"{processing_dir}/flow", input_name="flow"
)
flow_step_inputs.append(flow_file_input)
# parse the flow file for S3 inputs to Data Wranger job
for node in claims_flow["nodes"]:
if "dataset_definition" in node["parameters"]:
data_def = node["parameters"]["dataset_definition"]
# Fixed: The example code throws an error outside the us-east-2 region.
data_def["s3ExecutionContext"]["s3Uri"] = f's3://{bucket}/fraud-detect-demo/data/raw/claims.csv'
name = data_def["name"]
s3_input = sagemaker.processing.ProcessingInput(
source=data_def["s3ExecutionContext"]["s3Uri"],
destination=f"{processing_dir}/{name}",
input_name=name,
)
flow_step_inputs.append(s3_input)
# -
# #### Define outputs for first Data Wranger step
# +
claims_output_name = (
f"{claims_flow['nodes'][-1]['node_id']}.{claims_flow['nodes'][-1]['outputs'][0]['name']}"
)
flow_step_outputs = []
flow_output = sagemaker.processing.ProcessingOutput(
output_name=claims_output_name,
feature_store_output=sagemaker.processing.FeatureStoreOutput(feature_group_name=claims_fg_name),
app_managed=True,
)
flow_step_outputs.append(flow_output)
# -
# #### Define processor and processing step
# +
# You can find the proper image uri by exporting your Data Wrangler flow to a pipeline notebook
# =================================
#image_uri = "415577184552.dkr.ecr.us-east-2.amazonaws.com/sagemaker-data-wrangler-container:1.0.2"
from sagemaker import image_uris
image_uri = image_uris.retrieve(framework='data-wrangler',region=region)
flow_processor = sagemaker.processing.Processor(
role=sagemaker_role,
image_uri=image_uri,
instance_count=flow_instance_count,
instance_type=flow_instance_type,
max_runtime_in_seconds=86400,
)
### ProcessingStep (Data Wrangler for Claim Data)
claims_flow_step = ProcessingStep(
name="ClaimsDataWranglerProcessingStep",
processor=flow_processor,
inputs=flow_step_inputs,
outputs=flow_step_outputs,
)
# -
# <a id='data-wrangler'></a>
# ### Step 2: Customers Data Wrangler preprocessing step
#
# [pipeline](#pipelines)
s3_client.upload_file(
Filename="customers.flow", Bucket=bucket, Key=f"{prefix}/dataprep-notebooks/customers.flow"
)
claims_flow_uri = f"s3://{bucket}/{prefix}/dataprep-notebooks/customers.flow"
print(f"Customers flow file uploaded to S3")
# +
with open("customers.flow", "r") as f:
customers_flow = json.load(f)
flow_step_inputs = []
# flow file contains the code for each transformation
flow_file_input = sagemaker.processing.ProcessingInput(
source=claims_flow_uri, destination=f"{processing_dir}/flow", input_name="flow"
)
flow_step_inputs.append(flow_file_input)
# parse the flow file for S3 inputs to Data Wranger job
for node in customers_flow["nodes"]:
if "dataset_definition" in node["parameters"]:
data_def = node["parameters"]["dataset_definition"]
# Fixed: The example code throws an error outside the us-east-2 region.
data_def["s3ExecutionContext"]["s3Uri"] = f's3://{bucket}/fraud-detect-demo/data/raw/customers.csv'
name = data_def["name"]
s3_input = sagemaker.processing.ProcessingInput(
source=data_def["s3ExecutionContext"]["s3Uri"],
destination=f"{processing_dir}/{name}",
input_name=name,
)
flow_step_inputs.append(s3_input)
# +
customers_output_name = (
f"{customers_flow['nodes'][-1]['node_id']}.{customers_flow['nodes'][-1]['outputs'][0]['name']}"
)
flow_step_outputs = []
flow_output = sagemaker.processing.ProcessingOutput(
output_name=customers_output_name,
feature_store_output=sagemaker.processing.FeatureStoreOutput(
feature_group_name=customers_fg_name
),
app_managed=True,
)
flow_step_outputs.append(flow_output)
### ProcessingStep (Data Wrangler for Customer Data)
customers_flow_step = ProcessingStep(
name="CustomersDataWranglerProcessingStep",
processor=flow_processor,
inputs=flow_step_inputs,
outputs=flow_step_outputs,
)
# -
# <a id='dataset-train-test'></a>
# ### Step 3: Create Dataset and Train/Test Split
#
# [pipeline](#pipelines)
# +
s3_client.upload_file(
Filename="create_dataset.py", Bucket=bucket, Key=f"{prefix}/code/create_dataset.py"
)
create_dataset_processor = SKLearnProcessor(
framework_version="0.23-1",
role=sagemaker_role,
instance_type="ml.m5.xlarge",
instance_count=1,
base_job_name="fraud-detection-demo-create-dataset",
sagemaker_session=sagemaker_session,
)
### ProcessingStep
create_dataset_step = ProcessingStep(
name="CreateDataset",
processor=create_dataset_processor,
outputs=[
sagemaker.processing.ProcessingOutput(
output_name="train_data", source="/opt/ml/processing/output/train"
),
sagemaker.processing.ProcessingOutput(
output_name="test_data", source="/opt/ml/processing/output/test"
),
],
job_arguments=[
"--claims-feature-group-name",
claims_fg_name,
"--customers-feature-group-name",
customers_fg_name,
"--bucket-name",
bucket,
"--bucket-prefix",
prefix,
"--athena-database-name",
database_name,
"--claims-table-name",
claims_table,
"--customers-table-name",
customers_table,
"--region",
region
],
code=create_dataset_script_uri,
)
# -
# <a id='pipe-train-xgb'></a>
# ### Step 4: Train XGBoost Model
# 이 단계에서는 파이프라인 시작 부분에 정의된 ParameterString `train_instance_param`을 사용합니다.
#
# [pipeline](#pipelines)
# +
hyperparameters = {
"max_depth": "3",
"eta": "0.2",
"objective": "binary:logistic",
"num_round": "100",
}
xgb_estimator = XGBoost(
entry_point="xgboost_starter_script.py",
output_path=training_job_output_path,
code_location=training_job_output_path,
hyperparameters=hyperparameters,
role=sagemaker_role,
instance_count=train_instance_count,
instance_type=train_instance_param,
framework_version="1.0-1",
)
### TrainingStep
train_step = TrainingStep(
name="XgboostTrain",
estimator=xgb_estimator,
inputs={
"train": sagemaker.inputs.TrainingInput(
s3_data=create_dataset_step.properties.ProcessingOutputConfig.Outputs[
"train_data"
].S3Output.S3Uri
)
},
)
# -
# <a id='pipe-pre-deploy'></a>
# ### Step 5: Model Pre-Deployment Step
#
# [pipeline](#pipelines)
# +
model = sagemaker.model.Model(
name="fraud-detection-demo-pipeline-xgboost",
image_uri=train_step.properties.AlgorithmSpecification.TrainingImage,
model_data=train_step.properties.ModelArtifacts.S3ModelArtifacts,
sagemaker_session=sagemaker_session,
role=sagemaker_role,
)
inputs = sagemaker.inputs.CreateModelInput(instance_type="ml.m4.xlarge")
### CreateModelStep
create_model_step = CreateModelStep(name="ModelPreDeployment", model=model, inputs=inputs)
# -
# <a id='pipe-detect-bias'></a>
#
# ### Step 6: Run Bias Metrics with Clarify
# [pipeline](#pipelines)
# #### Clarify configuration
# +
bias_data_config = sagemaker.clarify.DataConfig(
s3_data_input_path=create_dataset_step.properties.ProcessingOutputConfig.Outputs[
"train_data"
].S3Output.S3Uri,
s3_output_path=pipeline_bias_output_path,
label="fraud",
dataset_type="text/csv",
)
bias_config = sagemaker.clarify.BiasConfig(
label_values_or_threshold=[0],
facet_name="customer_gender_female",
facet_values_or_threshold=[1],
)
analysis_config = bias_data_config.get_config()
analysis_config.update(bias_config.get_config())
analysis_config["methods"] = {"pre_training_bias": {"methods": "all"}}
clarify_config_dir = pathlib.Path("config")
clarify_config_dir.mkdir(exist_ok=True)
with open(clarify_config_dir / "analysis_config.json", "w") as f:
json.dump(analysis_config, f)
s3_client.upload_file(
Filename="config/analysis_config.json",
Bucket=bucket,
Key=f"{prefix}/clarify-config/analysis_config.json",
)
# -
# #### Clarify processing step
# +
clarify_processor = sagemaker.processing.Processor(
base_job_name="fraud-detection-demo-clarify-processor",
image_uri=sagemaker.clarify.image_uris.retrieve(framework="clarify", region=region),
role=sagemaker.get_execution_role(),
instance_count=1,
instance_type="ml.c5.xlarge",
)
### ProcessingStep (Clarify)
clarify_step = ProcessingStep(
name="ClarifyProcessor",
processor=clarify_processor,
inputs=[
sagemaker.processing.ProcessingInput(
input_name="analysis_config",
source=f"s3://{bucket}/{prefix}/clarify-config/analysis_config.json",
destination="/opt/ml/processing/input/config",
),
sagemaker.processing.ProcessingInput(
input_name="dataset",
source=create_dataset_step.properties.ProcessingOutputConfig.Outputs[
"train_data"
].S3Output.S3Uri,
destination="/opt/ml/processing/input/data",
),
],
outputs=[
sagemaker.processing.ProcessingOutput(
source="/opt/ml/processing/output/analysis.json",
destination=pipeline_bias_output_path,
output_name="analysis_result",
)
],
)
# -
# <a id='pipe-Register-Model'></a>
# ### Step 7: Register Model
#
# 이 단계에서는 파이프라인 코드의 시작 부분에 정의된 ParameterString `model_approval_status`를 사용합니다.
#
# [pipeline](#pipelines)
# +
model_metrics = demo_helpers.ModelMetrics(
bias=sagemaker.model_metrics.MetricsSource(
s3_uri=clarify_step.properties.ProcessingOutputConfig.Outputs[
"analysis_result"
].S3Output.S3Uri,
content_type="application/json",
)
)
### RegisterModel
register_step = RegisterModel(
name="XgboostRegisterModel",
estimator=xgb_estimator,
model_data=train_step.properties.ModelArtifacts.S3ModelArtifacts,
content_types=["text/csv"],
response_types=["text/csv"],
inference_instances=["ml.t2.medium", "ml.m5.xlarge"],
transform_instances=["ml.m5.xlarge"],
model_package_group_name=mpg_name,
approval_status=model_approval_status,
model_metrics=model_metrics,
)
# -
# <a id='pipe-Register-Model'></a>
# ### Step 8: Deploy Model
#
#
# [pipeline](#pipelines)
# +
s3_client.upload_file(
Filename="deploy_model.py", Bucket=bucket, Key=f"{prefix}/code/deploy_model.py"
)
deploy_model_processor = SKLearnProcessor(
framework_version="0.23-1",
role=sagemaker_role,
instance_type="ml.t3.medium",
instance_count=1,
base_job_name="fraud-detection-demo-deploy-model",
sagemaker_session=sagemaker_session,
)
### ProcessingStep (Deployment)
deploy_step = ProcessingStep(
name="DeployModel",
processor=deploy_model_processor,
job_arguments=[
"--model-name",
create_model_step.properties.ModelName,
"--region",
region,
"--endpoint-instance-type",
deploy_model_instance_type,
"--endpoint-name",
"xgboost-model-pipeline-0120",
],
code=deploy_model_script_uri,
)
# -
# <a id='define-pipeline'></a>
#
# ### Combine the Pipeline Steps and Run
# [pipeline](#overview-5)
#
# 추론하기는 쉽지만, 파라메터와 단계가 순서가 맞을 필요는 없습니다. 파이프라인 DAG는 이를 올바르게 파싱합니다.
# +
pipeline_name = f"FraudDetectDemo"
# %store pipeline_name
pipeline = Pipeline(
name=pipeline_name,
parameters=[train_instance_param, model_approval_status],
steps=[
claims_flow_step,
customers_flow_step,
create_dataset_step,
train_step,
create_model_step,
clarify_step,
register_step,
deploy_step,
],
)
# -
# ### Submit the pipeline definition to the SageMaker Pipeline service
#
# `upsert()` 메소드는 UpdatePipeline과 CreatePipeline API를 각각 호출하여 파이프라인 정의를 업데이트하거나(기존 파이프라인 존재 시) 신규 파이프라인을 생성합니다.
#
# - https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_UpdatePipeline.html
# - https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_CreatePipeline.html
pipeline.upsert(role_arn=sagemaker_role)
# ### View the entire pipeline definition
#
# 파이프라인 정의를 보면 보간된 모든 문자열 변수들이 파이프라인 버그를 디버그하는 데 도움이 될 수 있습니다. 아래 코드의 결과가 길기에 주석 처리되었습니다.
# +
#json.loads(pipeline.describe()['PipelineDefinition'])
# -
# ### Run the pipeline
#
# `start()` 메소드는 StartPipelineExecution API를 호출하여 파이프라인 실행을 트리거합니다.
# - https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_StartPipelineExecution.html
#
# 완료하는 데 약 20-25분이 소요됩니다. SageMaker Studio Components panel에서 파이프라인 작업의 진행 상황을 확인할 수 있습니다.
# ![image.png](attachment:image.png)
# Special pipeline parameters can be defined or changed here
parameters = {'TrainingInstance': 'ml.m5.xlarge'}
start_response = pipeline.start(parameters=parameters)
# ### 파이프라인 운영: 파이프라인 대기 및 실행상태 확인
# 워크플로우의 실행상황을 살펴봅니다.
start_response.describe()
# 실행이 완료될 때까지 기다립니다.
start_response.wait()
# <pre>
# </pre>
# ### 완료 후 다음과 같이 보일 것입니다.
# ![image.png](attachment:image.png)
# ![image.png](attachment:image.png)
# 실행된 스텝들을 리스트업합니다. 각 스텝의 시작 및 완료 시각, 상태, 메타데이터(arn, processing/training job)을 보여줍니다.
display(start_response.list_steps())
# <a id='cleanup'></a>
# ## Clean up
#
# [overview](#overview-5)
# ___
#
# 데모를 실행한 후, 생성된 리소스를 제거해야 합니다. keyword argument `delete_s3_objects=True` 를 전달하여 프로젝트의 S3 디렉터리에있는 모든 객체를 삭제할 수도 있습니다.
from demo_helpers import delete_project_resources
# +
# delete_project_resources(
# sagemaker_boto_client=sagemaker_boto_client,
# endpoint_name=endpoint_name,
# pipeline_name=pipeline_name,
# mpg_name=mpg_name,
# prefix=prefix,
# delete_s3_objects=False,
# bucket_name=bucket)
| 5-pipeline-e2e.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: learn-env
# language: python
# name: learn-env
# ---
import pyspark
import numpy as np
from pyspark.ml.recommendation import ALS, ALSModel
from pyspark.sql import functions, types
from pyspark.ml.evaluation import RegressionEvaluator
spark = (pyspark.sql.SparkSession.builder
.master("local[*]")
.getOrCreate())
# !head data/ratings.json
path_users = 'data/users.dat'
users = (spark.read.load("data/users.dat",
format="csv", sep=":", inferSchema="true")
.drop('_c1', '_c3', '_c5', '_c7'))
users = (users.withColumnRenamed(users.schema.names[0], 'userID')
.withColumnRenamed(users.schema.names[1], 'gender')
.withColumnRenamed(users.schema.names[2], 'age')
.withColumnRenamed(users.schema.names[3], 'occupation')
.withColumnRenamed(users.schema.names[4], 'zip'))
users.schema.names
print((users.count(), len(users.columns)))
users.show(5)
# read in the dataset into pyspark DataFrame
path_ratings = 'data/ratings.json'
ratings = spark.read.json(path_ratings)
ratings.show(5)
print((ratings.count(), len(ratings.columns)))
ratings.schema.names
ratings.printSchema()
ratings.show(5)
ratings.persist()
# #### inspect requests dataset
requests = spark.read.json('data/requests.json')
requests.persist()
requests.printSchema()
requests.show(5)
print((requests.count(), len(requests.columns)))
# ### transforming timestamp column of ratings
# convert format of datetime column 'timestamp' from epoch to standard
ratings = (ratings.withColumn('timestamp',
functions.date_format(ratings.timestamp.cast(dataType=types.TimestampType()),
"yyyy-MM-dd HH:mm:ss")))
ratings.persist()
ratings.show(5)
ratings = ratings.sort(ratings.timestamp.asc())
ratings.persist()
ratings.show(5)
print((ratings.count(), len(users.columns)))
719949*.8
719949 *.2
# Sort by index and get first 4000 rows
ratings_train = ratings.sort(ratings.timestamp.asc()).limit(575959)
ratings_train.persist()
ratings_train.show(5)
print((ratings_train.count(), len(ratings_train.columns)))
ratings_train.sort(ratings_train.timestamp.asc()).show(10)
ratings_test = ratings.subtract(ratings_train)
ratings_test.persist()
print((ratings_test.count(), len(ratings_test.columns)))
# ### model
# build recommendation model using ALS on the training data
als = ALS(
rank=10,
maxIter=10,
userCol='user_id',
itemCol='movie_id',
ratingCol='rating',
)
# +
# fit the ALS model on training set
als_model = als.fit(ratings_train)
# -
# ### predict on ratings_test with fitted model
# generate predictions with your model for the test set by using the transform method on your ALS model
preds_test = als_model.transform(ratings_test)
preds_test.persist()
# +
# evaluate your model and print out the RMSE from your test set
# -
preds_test.sort(preds_test.timestamp.asc()).show(10)
preds_test.where(preds_test['prediction'].isNotNull()).show()
# +
# Evaluate the model by computing the RMSE on the test data
# preds_test = als_model.transform(ratings_test)
# -
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating",
predictionCol="prediction")
rmse = evaluator.evaluate(preds_test)
rmse
# +
# inspect user_factors and item_factors
# -
user_factors = als_model.userFactors
user_factors.sort(user_factors.id.asc()).show(10)
user_factors.show(10)
print((user_factors.count(), len(user_factors.columns)))
item_factors = als_model.itemFactors
item_factors.show(10)
print((item_factors.count(), len(item_factors.columns)))
user_1570_row = user_factors[user_factors['id'] == 1570].first()
# note - Alex and I got significantly different values for the below
user_1570_row
user_1570_factors = np.array(user_1570_row['features'])
user_1570_factors
toy_story_row = item_factors[item_factors['id'] == 1].first()
toy_story_factors = np.array(toy_story_row['features'])
toy_story_row
toy_story_factors
user_1570_factors
toy_story_factors
user_1570_factors @ toy_story_factors
# +
# predictions.sort(predictions.timestamp.asc()).show(10)
# +
# ratings.where(col('rating').isNull())
# -
user_1570_preds = preds_test[preds_test['user_id'] == 1570]
user_1570_preds.sort('movie_id').show()
# produces
recs = als_model.recommendForAllUsers(numItems=10)
recs.persist()
# returns list of lists
recs.sort(recs.user_id.asc()).show()
# +
# normalized or standard_scalar, row-wise, normalize per movie
# +
# recs[recs['user_id']==10].first()['recommendations']
# +
# # !grep 3086 < data/movies.csv
# -
preds_requests = als_model.transform(requests)
preds_requests.persist()
preds_requests.where(preds_requests['prediction'].isNotNull()).show()
preds_requests.sort(preds_requests.timestamp.asc()).show(10)
print((preds_requests.count(), len(preds_requests.columns)))
| nb_lee_v1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from urllib.request import urlopen
html = urlopen("https://www.handbook.unsw.edu.au/ArchitectureAndBuilding/browse?interest_value=68b44253db96df002e4c126b3a961980")
wc=html.read()
with open('page_content1.html', 'w',encoding='latin-1') as fid:
fid.write(str(wc))
import requests
#def url_save(url): # url is the html file to be saved locally
url='https://www.handbook.unsw.edu.au/ArchitectureAndBuilding/browse?interest_value=68b44253db96df002e4c126b3a961980'
response = requests.get(url)
webContent = response.text
f = open('local_copy.html', 'w') #content will be saved in a file named local_copy.html
f.write(webContent)
f.close
url_save('https://www.handbook.unsw.edu.au/ArchitectureAndBuilding/browse?interest_value=68b44253db96df002e4c126b3a961980')
res1=requests.get('https://All courses _ Swinburne University of Technology.html')
soup1=bs4.BeautifulSoup(res1.text,'lxml')
data=open('All courses _ Swinburne University of Technology.html').read()
import bs4
soup=bs4.BeautifulSoup(data)
s=soup.select('td')
s
# +
from pyquery import PyQuery
html = open("All courses _ Swinburne University of Technology.html", 'r').read() # local html
query = PyQuery(html)
query("td").eq(1).text()
# -
| Australian_Universities/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# +
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
from tensorflow.keras import models
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from tensorflow.keras.optimizers import RMSprop, Adam
from tensorflow.keras.utils import to_categorical
# +
def prepare_data(data):
""" Prepare data for modeling
input: data frame with labels und pixel data
output: image and label array """
image_array = np.zeros(shape=(len(data), 48, 48, 1))
image_label = np.array(list(map(int, data['emotion'])))
for i, row in enumerate(data.index):
image = np.fromstring(data.loc[row, 'pixels'], dtype=int, sep=' ')
image = np.reshape(image, (48, 48, 1)) # 灰階圖的channel數為1
image_array[i] = image
return image_array, image_label
def plot_one_emotion_grayhist(data, img_arrays, img_labels, label=0):
fig, axs = plt.subplots(1, 5, figsize=(25, 12))
fig.subplots_adjust(hspace=.2, wspace=.2)
axs = axs.ravel()
for i in range(5):
idx = data[data['emotion'] == label].index[i]
axs[i].hist(img_arrays[idx][:, :, 0], 256, [0, 256])
axs[i].set_title(emotions[img_labels[idx]])
axs[i].set_xticklabels([])
axs[i].set_yticklabels([])
def plot_one_emotion(data, img_arrays, img_labels, label=0):
fig, axs = plt.subplots(1, 7, figsize=(25, 12))
fig.subplots_adjust(hspace=.2, wspace=.2)
axs = axs.ravel()
for i in range(7):
idx = data[data['emotion'] == label].index[i]
axs[i].imshow(img_arrays[idx][:, :, 0], cmap='gray')
axs[i].set_title(emotions[img_labels[idx]])
axs[i].set_xticklabels([])
axs[i].set_yticklabels([])
def plot_all_emotions(data, img_arrays, img_labels):
fig, axs = plt.subplots(1, 7, figsize=(30, 12))
fig.subplots_adjust(hspace=.2, wspace=.2)
axs = axs.ravel()
for i in range(7):
idx = data[data['emotion'] == i].index[0] # 取該表情的第一張圖的位置
axs[i].imshow(img_arrays[idx][:, :, 0], cmap='gray')
axs[i].set_title(emotions[img_labels[idx]])
axs[i].set_xticklabels([])
axs[i].set_yticklabels([])
def plot_image_and_emotion(test_image_array, test_image_label, pred_test_labels, image_number):
""" Function to plot the image and compare the prediction results with the label """
fig, axs = plt.subplots(1, 2, figsize=(12, 6), sharey=False)
bar_label = emotions.values()
axs[0].imshow(test_image_array[image_number], 'gray')
axs[0].set_title(emotions[test_image_label[image_number]])
axs[1].bar(bar_label, pred_test_labels[image_number],
color='orange', alpha=0.7)
axs[1].grid()
plt.show()
def plot_compare_distributions(img_labels_1, img_labels_2, title1='', title2=''):
df_array1 = pd.DataFrame()
df_array2 = pd.DataFrame()
df_array1['emotion'] = img_labels_1
df_array2['emotion'] = img_labels_2
fig, axs = plt.subplots(1, 2, figsize=(12, 6), sharey=False)
x = emotions.values()
y = df_array1['emotion'].value_counts()
keys_missed = list(set(emotions.keys()).difference(set(y.keys())))
for key_missed in keys_missed:
y[key_missed] = 0
axs[0].bar(x, y.sort_index(), color='orange')
axs[0].set_title(title1)
axs[0].grid()
y = df_array2['emotion'].value_counts()
keys_missed = list(set(emotions.keys()).difference(set(y.keys())))
for key_missed in keys_missed:
y[key_missed] = 0
axs[1].bar(x, y.sort_index())
axs[1].set_title(title2)
axs[1].grid()
plt.show()
emotions = {0: 'Angry', 1: 'Disgust', 2: 'Fear',
3: 'Happy', 4: 'Sad', 5: 'Surprise', 6: 'Neutral'}
# -
df_raw = pd.read_csv("D:/mycodes/AIFER/data/fer2013.csv")
df_raw.head()
df_raw['Usage'].value_counts() # 8:1:1
# +
df_train = df_raw[df_raw['Usage'] == 'Training']
df_val = df_raw[df_raw['Usage'] == 'PublicTest']
df_test = df_raw[df_raw['Usage'] == 'PrivateTest']
X_train, y_train = prepare_data(df_train)
X_val, y_val = prepare_data(df_val)
X_test, y_test = prepare_data(df_test)
y_train_oh = to_categorical(y_train)
y_val_oh = to_categorical(y_val)
y_test_oh = to_categorical(y_test)
plot_all_emotions(df_train, X_train, y_train)
# -
for label in emotions.keys():
plot_one_emotion(df_train, X_train, y_train, label=label)
for label in emotions.keys():
plot_one_emotion_grayhist(df_train, X_train, y_train, label=label)
plot_compare_distributions(
y_train, y_val, title1='train labels', title2='val labels')
# +
n_sample, nrow, ncol, nchannel = X_train.shape
X = X_train.reshape((n_sample, ncol * nrow * nchannel))
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
print(pca.explained_variance_ratio_)
X_pca = pca.transform(X)
# -
plt.xlabel('pca_dim1')
plt.ylabel('pca_dim2')
plt.title('Images look like when they are in 2-dim')
plt.scatter(X_pca[:, 0], X_pca[:, 1], color='green', marker=".")
distortions = []
K = range(1, 10)
for k in K:
kmeans = KMeans(n_clusters=k).fit(X_pca)
kmeans.fit(X_pca)
distortions.append(sum(np.min(
cdist(X_pca, kmeans.cluster_centers_, 'euclidean'), axis=1)) / X_pca.shape[0])
plt.plot(K, distortions, 'bx-')
plt.xlabel('k')
plt.ylabel('Distortion')
plt.title('The Elbow Method showing the optimal k')
for k in range(1, 9):
plt.text(k+0.65, 0.3, f"{distortions[k]-distortions[k-1]:.2f}",
bbox=dict(facecolor='green', alpha=0.5))
plt.show()
| notebooks/Day03_EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Bayesian Statistical Inference
#
# <NAME>, 2016 (with input from Ivezic $\S5$, Bevington, <NAME>'s [Bayesian Stats](http://seminar.ouml.org/lectures/bayesian-statistics/) and [MCMC](http://seminar.ouml.org/lectures/monte-carlo-markov-chain-mcmc/) lectures, and [<NAME>](http://twiecki.github.io/blog/2015/11/10/mcmc-sampling/).)
#
# Up to now we have been using Classical Inference: finding model parameters that maximize the
# **likelihood** $p(D|M)$.
#
# In Bayesian inference, the argument is that probability statements can be made not just for data, but also models and model parameters. As a result, we instead evaluate the **posterior probability** taking into account **prior** information.
#
# Recall from the BasicStats lecture that Bayes' Rule is:
# $$p(M|D) = \frac{p(D|M)p(M)}{p(D)},$$
# where $D$ is for data and $M$ is for model.
#
# We wrote this in words as:
# $${\rm Posterior Probability} = \frac{{\rm Likelihood}\times{\rm Prior}}{{\rm Evidence}}.$$
#
# If we explicitly recognize prior information, $I$, and the model parameters, $\theta$, then we can write:
# $$p(M,\theta|D,I) = \frac{p(D|M,\theta,I)p(M,\theta|I)}{p(D|I)},$$
# where we will omit the explict dependence on $\theta$ by writing $M$ instead of $M,\theta$ where appropriate. However, as the prior can be expanded to
# $$p(M,\theta|I) = p(\theta|M,I)p(M|I),$$
# it will still appear in the term $p(\theta|M,I)$.
#
# Note that it is often that case that $p(D|I)$ is not evaluated explictly since the likelihood can be normalized such that it is unity or we will instead take the ratio of two posterior probabilities such that this term cancels out.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Analysis of a Heteroscedastic Gaussian distribution with Bayesian Priors
#
# Consider the case of measuring a rod as we discussed previously. We want to know the posterior pdf for the length of the rod, $p(M,\theta|D,I) = p(\mu|\{x_i\},\{\sigma_i\},I)$.
#
# For the likelihood we have
# $$L = p(\{x_i\}|\mu,I) = \prod_{i=1}^N \frac{1}{\sigma_i\sqrt{2\pi}} \exp\left(\frac{-(x_i-\mu)^2}{2\sigma_i^2}\right).$$
#
# In the Bayesian case, we also need a prior. We'll adopt a uniform distribution given by
# $$p(\mu|I) = C, \; {\rm for} \; \mu_{\rm min} < \mu < \mu_{\rm max},$$
# where $C = \frac{1}{\mu_{\rm max} - \mu_{\rm min}}$ between the min and max and is $0$ otherwise.
#
# The log of the posterior pdf is then
# $$\ln L = {\rm constant} - \sum_{i=1}^N \frac{(x_i - \mu)^2}{2\sigma_i^2}.$$
#
# This is exactly the same as we saw before, except that the value of the constant is different. Since the constant doesn't come into play, we get the same result as before:
#
# $$\mu^0 = \frac{\sum_i^N (x_i/\sigma_i^2)}{\sum_i^N (1/\sigma_i^2)},$$
# with uncertainty
# $$\sigma_{\mu} = \left( \sum_{i=1}^N \frac{1}{\sigma_i^2}\right)^{-1/2}.$$
# + [markdown] slideshow={"slide_type": "slide"}
# We get the same result because we used a flat prior. If the case were homoscedastic instead of heteroscedastic, we obviously would get the result from our first example.
#
# Now let's consider the case where $\sigma$ is *not* known, but rather needs to be determined from the data. In that case, the posterior pdf that we seek is not $p(\mu|\{x_i\},\{\sigma_i\},I)$, but rather $p(\mu,\sigma|\{x_i\},I)$.
#
# As before we have
# $$L = p(\{x_i\}|\mu,\sigma,I) = \prod_{i=1}^N \frac{1}{\sigma\sqrt{2\pi}} \exp\left(\frac{-(x_i-\mu)^2}{2\sigma^2}\right),$$
# except that now $\sigma$ is uknown.
#
# Our Bayesian prior is now 2D instead of 1D and we'll adopt
# $$p(\mu,\sigma|I) \propto \frac{1}{\sigma},\; {\rm for} \; \mu_{\rm min} < \mu < \mu_{\rm max} \; {\rm and} \; \sigma_{\rm min} < \sigma < \sigma_{\rm max}.$$
#
# With proper normalization, we have
# $$p(\{x_i\}|\mu,\sigma,I)p(\mu,\sigma|I) = C\frac{1}{\sigma^{(N+1)}}\prod_{i=1}^N \exp\left( \frac{-(x_i-\mu)^2}{2\sigma^2} \right),$$
# where
# $$C = (2\pi)^{-N/2}(\mu_{\rm max}-\mu_{\rm min})^{-1} \left[\ln \left( \frac{\sigma_{\rm max}}{\sigma_{\rm min}}\right) \right]^{-1}.$$
# + [markdown] slideshow={"slide_type": "slide"}
# The log of the posterior pdf is
#
# $$\ln[p(\mu,\sigma|\{x_i\},I)] = {\rm constant} - (N+1)\ln\sigma - \sum_{i=1}^N \frac{(x_i - \mu)^2}{2\sigma^2}.$$
#
# Right now that has $x_i$ in it, which isn't that helpful, but since we are assuming a Gaussian distribution, we can take advantage of the fact that the mean, $\overline{x}$, and the variance, $V (=s^2)$, completely characterize the distribution. So we can write this expression in terms of those variables instead of $x_i$. Skipping over the math details (see Ivezic $\S$5.6.1), we find
#
# $$\ln[p(\mu,\sigma|\{x_i\},I)] = {\rm constant} - (N+1)\ln\sigma - \frac{N}{2\sigma^2}\left( (\overline{x}-\mu)^2 + V \right).$$
#
# Note that this expression only contains the 2 parameters that we are trying to determine: $(\mu,\sigma)$ and 3 values that we can determine directly from the data: $(N,\overline{x},V)$.
#
# Load and execute the next cell to visualize the posterior pdf for the case of $(N,\overline{x},V)=(10,1,4)$. Remember to change `usetex=True` to `usetex=False` if you have trouble with the plotting. Try changing the values of $(N,\overline{x},V)$.
# +
# # %load code/fig_likelihood_gaussian.py
"""
Log-likelihood for Gaussian Distribution
----------------------------------------
Figure5.4
An illustration of the logarithm of the posterior probability density
function for :math:`\mu` and :math:`\sigma`, :math:`L_p(\mu,\sigma)`
(see eq. 5.58) for data drawn from a Gaussian distribution and N = 10, x = 1,
and V = 4. The maximum of :math:`L_p` is renormalized to 0, and color coded as
shown in the legend. The maximum value of :math:`L_p` is at :math:`\mu_0 = 1.0`
and :math:`\sigma_0 = 1.8`. The contours enclose the regions that contain
0.683, 0.955, and 0.997 of the cumulative (integrated) posterior probability.
"""
# Author: <NAME>
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.plotting.mcmc import convert_to_stdev
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def gauss_logL(xbar, V, n, sigma, mu):
"""Equation 5.57: gaussian likelihood"""
return (-(n + 1) * np.log(sigma)
- 0.5 * n * ((xbar - mu) ** 2 + V) / sigma ** 2)
#------------------------------------------------------------
# Define the grid and compute logL
sigma = np.linspace(1, 5, 70)
mu = np.linspace(-3, 5, 70)
xbar = 1
V = 4
n = 10
logL = gauss_logL(xbar, V, n, sigma[:, np.newaxis], mu)
logL -= logL.max()
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
plt.imshow(logL, origin='lower',
extent=(mu[0], mu[-1], sigma[0], sigma[-1]),
cmap=plt.cm.binary,
aspect='auto')
plt.colorbar().set_label(r'$\log(L)$')
plt.clim(-5, 0)
plt.contour(mu, sigma, convert_to_stdev(logL),
levels=(0.683, 0.955, 0.997),
colors='k')
plt.text(0.5, 0.93, r'$L(\mu,\sigma)\ \mathrm{for}\ \bar{x}=1,\ V=4,\ n=10$',
bbox=dict(ec='k', fc='w', alpha=0.9),
ha='center', va='center', transform=plt.gca().transAxes)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# The shaded region is the posterior probability. The contours are the confidence intervals. We can compute those by determining the marginal distribution at each $(\mu,\sigma)$. The top panels of the figures below show those marginal distributions. The solid line is what we just computed. The dotted line is what we would have gotten for a uniform prior--not that much difference. The dashed line is the MLE result, which is quite different. The bottom panels show the cumulative distribution.
#
# ![Ivezic, Figure 5.5](http://www.astroml.org/_images/fig_posterior_gaussian_1.png)
#
#
# Note that the marginal pdfs follow a Student's $t$ Distribution, which becomes Gaussian for large $N$.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Recap
#
# To review: the Bayesian Statistical Inference process is
# * formulate the likelihood, $p(D|M,I)$
# * chose a prior, $p(\theta|M,I)$, which incorporates other information beyond the data in $D$
# * determine the posterior pdf, $p(M|D,I)$
# * search for the model paramters that maximize $p(M|D,I)$
# * quantify the uncertainty of the model parameter estimates
# * test the hypothesis being addressed
#
# The last part we haven't talked about yet.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Another Example
#
# What if we wanted to model the mixture of a Gauassian distribution with a uniform distribution. When might that be useful? Well, for example:
#
# ![Atlas Higgs Boson Example](http://www.atlasexperiment.org/photos/atlas_photos/selected-photos/plots/fig_02.png)
#
# Obviously this isn't exactly a Gaussian and a uniform distribution, but a line feature superimposed upon a background is the sort of thing that a physicist might see and is pretty close to this case for a local region around the feature of interest. This is the example discussed in Ivezic $\S$5.6.5.
#
# For this example, we will assume that the location parameter, $\mu$, is known (say from theory) and that the errors in $x_i$ are negligible compared to $\sigma$.
# + [markdown] slideshow={"slide_type": "slide"}
# The likelihood of obtaining a measurement, $x_i$, in this example can be written as
# $$L = p(x_i|A,\mu,\sigma,I) = \frac{A}{\sigma\sqrt{2\pi}} \exp\left(\frac{-(x_i-\mu)^2}{2\sigma^2}\right) + \frac{1-A}{W}.$$
#
# Here the background probability is evaluated over $0 < x < W$ and 0 otherwise, that is the feature of interest lies between $0$ and $W$. $A$ and $1-A$ are the relative strengths of the two components, which are obviously anti-correlated. Note that there will be covariance between $A$ and $\sigma$.
# + [markdown] slideshow={"slide_type": "slide"}
# If we adopt a uniform prior in both $A$ and $\sigma$:
# $$p(A,\sigma|I) = C, \; {\rm for} \; 0\le A<A_{\rm max} \; {\rm and} \; 0 \le \sigma \le \sigma_{\rm max},$$
# then the posterior pdf is given by
# $$\ln [p(A,\sigma|\{x_i\},\mu,W)] = \sum_{i=1}^N \ln \left[\frac{A}{\sigma \sqrt{2\pi}} \exp\left( \frac{-(x_i-\mu)^2}{2\sigma^2} \right) + \frac{1-A}{W} \right].$$
#
# The figure below (Ivezic, 5.13) shows an example for $N=200, A=0.5, \sigma=1, \mu=5, W=10$. Specifically, the bottom panel is a result drawn from this distribution and the top panel is the likelihood distribution derived from the data in the bottom panel.
# ![Ivezic, Figure 5.13](http://www.astroml.org/_images/fig_likelihood_gausslin_1.png)
# + [markdown] slideshow={"slide_type": "slide"}
# A more realistic example might be one where all three parameters are unknown: the location, the width, and the background level. But that will have to wait until $\S$5.8.6.
#
# In the meantime, note that we have not binned the data, $\{x_i\}$. We only binned Figure 5.13 for the sake of visualizaiton. However, sometimes the data are inherently binned (e.g., the detector is pixelated). In that case, the data would be in the form of $(x_i,y_i)$, where $y_i$ is the number of counts at each location. We'll skip over this example, but you can read about it in Ivezic $\S$5.6.6. A refresher on the Poission distribution (Ivezic $\S$3.3.4) might be appropriate first.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Model Comparison
#
# Up to now we have concerned ourselves with determining the optimal parameters of a given model fit. But what if *another* model would be a better fit (regardless of how you choose the parameters of the first model).
#
# That leads us to a discussion of model comparison. This is discussed in more detail in Ivezic $\S$5.4 and $\S$5.7.1-3.
#
# To determine which model is better we compute the ratio of the posterior probabilities or the **odds ratio** for two models as
# $$O_{21} \equiv \frac{p(M_2|D,I)}{p(M_1|D,I)}.$$
#
# Since
# $$p(M|D,I) = \frac{p(D|M,I)p(M|I)}{p(D|I)},$$
# the odds ratio can ignore $p(D|I)$ since it will be the same for both models.
#
# (We'll see later why that is even more important than you might think as the denominator is the integral of the numerator, but what if you don't have an analytical function that you can integrate?!)
# + [markdown] slideshow={"slide_type": "skip"}
# ### Bayesian Hypothesis Testing
#
# In *hypothesis testing* we are essentially comparing a model, $M_1$, to its complement. That is $p(M_1) + p(M_2) = 1$. If we take $M_1$ to be the "null" (default) hypothesis (which is generally that, for example, a correlation does *not* exist), then we are asking whether or not the data reject the null hypothesis.
#
# In classical hypothesis testing we can ask whether or not a single model provides a good description of the data. In Bayesian hypothesis testing, we need to have an alternative model to compare to.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Markov-Chain Monte Carlo Methods
# + [markdown] slideshow={"slide_type": "slide"}
# Figure 5.10 from Ivezic shows the likelihood for a particular example:
# ![Ivezic, Figure 5.10](http://www.astroml.org/_images/fig_likelihood_cauchy_1.png)
#
# What was required to produce this figure? We needed to know the analytic form of the posterior distribution. But imagine that you don’t have a nice analytical function for the likelihood. You could still make a plot like the one above, by making a simulated model for the likelihood at every point, comparing the model with the data to generate a likelihood, populating the grid with those numerical likelihood estimates, then finding the best fitting parameters by locating the maximum in likelihood space.
# + [markdown] slideshow={"slide_type": "slide"}
# Now imagine that you have a problem with many parameters. If you have even 5 parameters and you want to sample 100 points of parameter space for each, that is $10^{10}$ points. It might take you a while (even your computer). So you might not be able to sample the full space given time (and memory) constraints.
#
# You *could* simply randomly sample the grid at every point, and try to find the minimum based on that. But that can also be quite time consuming, and you will spend a lot of time in regions of parameter space that yields small likelihood.
#
# However, a better way is to adopt a **Markov-Chain Monte Carlo (MCMC)**. MCMC gives us a way to make this problem computationally tractable by sampling the full multi-dimensional parameter space, in a way that builds up the most density in the regions of parameter space which are closest to the maximum. Then, you can post-process the “chain” to infer the distribution and error regions.
# + [markdown] slideshow={"slide_type": "slide"}
# Ivezic, Figure 5.22 shows the same problem as above, done with a Markov Chain Monte Carlo. The dashed lines are the known (analytic) solution. The solid lines are from the MCMC estimate with 10,000 sample points.
# ![Ivezic, Figure 5.10](http://www.astroml.org/_images/fig_cauchy_mcmc_1.png)
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## How does MCMC work?
#
# I've really struggled to come up with a simple way of illustrating MCMC so that you (and I for that matter) can understand it. Unfortunately, even the supposedly dumbed-down explanations are really technical. But let's see what I can do!
#
# Let's start by simply trying to understand what a Monte Carlo is and what a Markov Chain is.
# + [markdown] slideshow={"slide_type": "slide"}
# ### What is a Monte Carlo?
#
# In case you are not familiar with Monte Carlo methods, it might help to know that the term is derived from the Monte Carlo Casino as gambling and random sampling go together.
#
# We'll consider a simple example: you have forgotten the formula for the area of a circle, but you know the formula for the area of a square and how to draw a circle.
#
# We can use the information that we *do* know to numerically compute the area of a circle.
#
# We start by drawing a square and circumscribing a circle in it. Then we put down random points within the square and note which ones land in the circle. The ratio of random points in the circle to the number of random points drawn is related to the area of our circle. No need to know $\pi$. Using more random points yields more precise estimates of the area.
#
# Try it.
# + slideshow={"slide_type": "slide"}
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 10))
#Draw a square that spans ([-1,1],[-1,1])
x = np.array(# Complete
y = np.array(# Complete
plt.xlim(-1.5,1.5)
plt.ylim(-1.5,1.5)
plt.plot(x,y)
# Now draw a circle with radius = 1
u = np.linspace(-1,1,100)
# Top half of circle
v = np.sqrt(1.0-u**2)
# Bottom half
v2 = -1.0*v
# Combine the top and bottom halves together
u = # Complete
v = # Complete
plt.plot(u,v)
# Uniformly sample between -1 and 1 in 2 dimensions. Do this for 1000 draws
z = # Complete
# Now figure out how many of those draws are in the circle (all are in the square by definition)
n = 0
for a,b in z:
if # Complete
plt.scatter(a,b,c='g')
n=n+1
else:
plt.scatter(a,b,c='r')
# Use that information to compute the area of the circle (without using the formula)
print # Complete
# + [markdown] slideshow={"slide_type": "slide"}
# For homework plot the distribution of results for lots of such experiments. Do you get the expected $\sigma$?
#
# + [markdown] slideshow={"slide_type": "slide"}
# In general, Monte Carlo methods are about using random sampling to obtain a numerical result (e.g., the value of an integral), where there is no analytic result.
#
# In the case of the circle above, we have computed the intergral:
# $$\int\int_{x^2+y^2\le 1} dx dy.$$
# + [markdown] slideshow={"slide_type": "slide"}
# ### What is a Markov Chain?
#
# A Markov Chain is defined as a sequence of random variables where a parameter depends *only* on the preceding value. Such processes are "memoryless".
#
# Mathematically, we have
# $$p(\theta_{i+1}|\{\theta_i\}) = p(\theta_{i+1}|\theta_i).$$
#
# Now, if you are like me, you might look at that and say "Well, day 3 is based on day 2, which is based on day 1, so day 3 is based on day 1...".
#
# So let's look at an example to see what we mean and how this might be a memoryless process.
#
# + [markdown] slideshow={"slide_type": "slide"}
# Let's say that you are an astronomer and you want to know how likely it is going to be clear tomorrow night given the weather tonight (clear or cloudy). From past history, you know that:
#
# $$p({\rm clear \; tomorrow} \, |\, {\rm cloudy \; today}) = 0.5,$$
# which means that
# $$p({\rm cloudy \; tomorrow} \, |\, {\rm cloudy \; today}) = 0.5.$$
#
# We also have
# $$p({\rm cloudy \; tomorrow} \, |\, {\rm clear \; today}) = 0.1,$$
# which means that
# $$p({\rm clear \; tomorrow} \, |\, {\rm clear \; today}) = 0.9.$$
#
# (That is, you don't live in Philadelphia.)
#
# We can start with the sky conditions today and make predictions going forward. This will look like a big decision tree. After enough days, we'll reach equilibrium probabilities that have to do with the mean weather statistics (ignoring seasons) and we'll arrive at
#
# $$p({\rm clear}) = 0.83,$$
# and
# $$p({\rm cloudy}) = 0.17.$$
#
# You get the same answer for day $N$ as day $N+1$ and it doesn't matter whether is was clear to cloudy on the day that you started.
#
# The steps that we have taken in this process are a **Markov Chain**.
# + [markdown] slideshow={"slide_type": "slide"}
# In MCMC the prior must be **stationary** which basically means that its looks the same no matter where you sample it.
#
# Obviously that isn't going to be the case in the early steps of the chain. In our example above, after some time the process was stationary, but not in the first few days.
#
# So, there is a **burn-in** phase that needs to be discarded. How one determines how long many iterations the burn-in should last when you don't know the distribution can be a bit tricky.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Markov Chain Monte Carlo Summary
#
# 1. Starting at a random position, evaluate the likelihood.
# 2. Choose a new position, according to some transition probabilities, and evaluate the likelihood there.
# 3. Examine the odds ratio formed by the new-position likelihood and the old-position likelihood. If the odds ratio is greater than 1, move to the new position. If it is less than one, keep it under the following conditions: draw a random number between zero and 1. If the odds ratio is smaller than the random number, keep it. If not, reject the new position.
# 4. Repeat 1-3 many times. After a period of time (the burn-in) the simulation should reach an equilibrium. Keep the results of the chain (after burn-in), and postprocess those results to infer the likelihood surface.
#
# + [markdown] slideshow={"slide_type": "slide"}
# Most of the difficulty in the MCMC process comes from either determining the burn-in or deciding how to step from one position to another. In our circle example we have drawn points in a completely random manner. However, that may not be the most efficient manner to span the space.
#
# The most commonly used algorithm for stepping from one position to another is the [Metropolis-Hastings] (https://en.wikipedia.org/wiki/Metropolis%E2%80%93Hastings_algorithm) algorithm.
#
# In astronomy, the ${\tt emcee}$ algorithm has become more popular in recent years. We won't discuss either in detail, but both the [code](http://dan.iel.fm/emcee/current/) and a [paper[(http://adsabs.harvard.edu/abs/2013PASP..125..306F) describing the ${\tt emcee}$ are available.
#
# Recall that our parameter space it multidimensional. So, when you are stepping from one point to another, you are really doing it in N-D parameter space! You might wonder if you could just step one parameter at a time. Sure! That's what [Gibbs sampling](https://en.wikipedia.org/wiki/Gibbs_sampling) does.
#
# + [markdown] slideshow={"slide_type": "slide"}
# Then end result of this process will be a chain of likelihoods that we can use to compute the likelihood contours.
#
# If you are using MCMC, then you probably have multiple paramters (otherwise, you'd be doing something easier). So, it helps to display the parameters two at a time, marginalizing over the other parameters. An example is given in Ivezic, Figure 5.24, which compares the model results for a single Gaussian fit to a double Gaussian fit:
#
# ![Ivezic, Figure 5.24](http://www.astroml.org/_images/fig_model_comparison_mcmc_1.png)
# + [markdown] slideshow={"slide_type": "slide"}
# We'll end by going through the example given at
# [http://twiecki.github.io/blog/2015/11/10/mcmc-sampling/](http://twiecki.github.io/blog/2015/11/10/mcmc-sampling/).
#
# First set up some stuff by executing the next cell
# + slideshow={"slide_type": "slide"}
# %matplotlib inline
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import norm
np.random.seed(123)
# + [markdown] slideshow={"slide_type": "slide"}
# Now let's generate some data points and plot them. We'll try a normal distribution, centered at 0 with 100 data points. Our goal is to estimate $\mu$.
# + slideshow={"slide_type": "slide"}
data = np.random.randn(100)
plt.figure(figsize=(8,8))
plt.hist(data)
plt.xlabel('x')
plt.ylabel('N')
# + [markdown] slideshow={"slide_type": "slide"}
# Now we have to pick a model to try. For the sake of simplicity for this example, we'll assume a normal distribution: $\mathscr{N}(\mu,\sigma=1)$ (i.e., with $\sigma=1$). We'll also assume a normal distribution for the prior on $\mu$: $\mathscr{N}(0,1)$.
#
# We can use that to write a function for our posterior distribution as follows:
# + slideshow={"slide_type": "slide"}
def calc_posterior_analytical(data, x, mu_0, sigma_0):
sigma = 1.
n = len(data)
mu_post = (mu_0 / sigma_0**2 + data.sum() / sigma**2) / (1. / sigma_0**2 + n / sigma**2)
sigma_post = (1. / sigma_0**2 + n / sigma**2)**-1
return norm(mu_post, np.sqrt(sigma_post)).pdf(x)
plt.figure(figsize=(8,8))
x = np.linspace(-1, 1, 500)
posterior_analytical = calc_posterior_analytical(data, x, 0., 1.)
plt.plot(x, posterior_analytical)
plt.xlabel('mu')
plt.ylabel('post prob')
# + [markdown] slideshow={"slide_type": "slide"}
# Now we need to sample the distribution space. Let's start by trying $\mu_0 = 0$ and evaluate.
#
# Then we'll jump to a new position using one of the algorithms mentioned above. In this case we'll use the Metropolis algorithm, which draws the new points from a normal distribution centered on the current guess for $\mu$.
#
# Next we evaluate whether that jump was "good" or not -- by seeing if the value of likelihood\*prior increases. Now, we want to get the right answer, but we also want to make sure that we sample the full parameter space (so that we don't) get stuck in a local minimum. So, even if the this location is not better than the last one, we'll have some probability of staying there anyway.
#
# The reason that taking the ratio of likelihood\*prior works is that the denominator drops out. That's good because the denominator is the integral of the numerator and that's what we are trying to figure out! In short, we don't have to know the posterior probability to know that the posterior probability at one step is better than another.
# + slideshow={"slide_type": "slide"}
# Execute this cell
# See https://github.com/twiecki/WhileMyMCMCGentlySamples/blob/master/content/downloads/notebooks/MCMC-sampling-for-dummies.ipynb
def sampler(data, samples=4, mu_init=.5, proposal_width=.5, plot=False, mu_prior_mu=0, mu_prior_sd=1.):
mu_current = mu_init
posterior = [mu_current]
for i in range(samples):
# suggest new position
mu_proposal = norm(mu_current, proposal_width).rvs()
# Compute likelihood by multiplying probabilities of each data point
likelihood_current = norm(mu_current, 1).pdf(data).prod()
likelihood_proposal = norm(mu_proposal, 1).pdf(data).prod()
# Compute prior probability of current and proposed mu
prior_current = norm(mu_prior_mu, mu_prior_sd).pdf(mu_current)
prior_proposal = norm(mu_prior_mu, mu_prior_sd).pdf(mu_proposal)
p_current = likelihood_current * prior_current
p_proposal = likelihood_proposal * prior_proposal
# Accept proposal?
p_accept = p_proposal / p_current
# Usually would include prior probability, which we neglect here for simplicity
accept = np.random.rand() < p_accept
if plot:
plot_proposal(mu_current, mu_proposal, mu_prior_mu, mu_prior_sd, data, accept, posterior, i)
if accept:
# Update position
mu_current = mu_proposal
posterior.append(mu_current)
return posterior
# Function to display
def plot_proposal(mu_current, mu_proposal, mu_prior_mu, mu_prior_sd, data, accepted, trace, i):
from copy import copy
trace = copy(trace)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(ncols=4, figsize=(16, 4))
fig.suptitle('Iteration %i' % (i + 1))
x = np.linspace(-3, 3, 5000)
color = 'g' if accepted else 'r'
# Plot prior
prior_current = norm(mu_prior_mu, mu_prior_sd).pdf(mu_current)
prior_proposal = norm(mu_prior_mu, mu_prior_sd).pdf(mu_proposal)
prior = norm(mu_prior_mu, mu_prior_sd).pdf(x)
ax1.plot(x, prior)
ax1.plot([mu_current] * 2, [0, prior_current], marker='o', color='b')
ax1.plot([mu_proposal] * 2, [0, prior_proposal], marker='o', color=color)
ax1.annotate("", xy=(mu_proposal, 0.2), xytext=(mu_current, 0.2),
arrowprops=dict(arrowstyle="->", lw=2.))
ax1.set(ylabel='Probability Density', title='current: prior(mu=%.2f) = %.2f\nproposal: prior(mu=%.2f) = %.2f' % (mu_current, prior_current, mu_proposal, prior_proposal))
# Likelihood
likelihood_current = norm(mu_current, 1).pdf(data).prod()
likelihood_proposal = norm(mu_proposal, 1).pdf(data).prod()
y = norm(loc=mu_proposal, scale=1).pdf(x)
#sns.distplot(data, kde=False, norm_hist=True, ax=ax2)
ax2.hist(data,alpha=0.5,normed='True')
ax2.plot(x, y, color=color)
ax2.axvline(mu_current, color='b', linestyle='--', label='mu_current')
ax2.axvline(mu_proposal, color=color, linestyle='--', label='mu_proposal')
#ax2.title('Proposal {}'.format('accepted' if accepted else 'rejected'))
ax2.annotate("", xy=(mu_proposal, 0.2), xytext=(mu_current, 0.2),
arrowprops=dict(arrowstyle="->", lw=2.))
ax2.set(title='likelihood(mu=%.2f) = %.2f\nlikelihood(mu=%.2f) = %.2f' % (mu_current, 1e14*likelihood_current, mu_proposal, 1e14*likelihood_proposal))
# Posterior
posterior_analytical = calc_posterior_analytical(data, x, mu_prior_mu, mu_prior_sd)
ax3.plot(x, posterior_analytical)
posterior_current = calc_posterior_analytical(data, mu_current, mu_prior_mu, mu_prior_sd)
posterior_proposal = calc_posterior_analytical(data, mu_proposal, mu_prior_mu, mu_prior_sd)
ax3.plot([mu_current] * 2, [0, posterior_current], marker='o', color='b')
ax3.plot([mu_proposal] * 2, [0, posterior_proposal], marker='o', color=color)
ax3.annotate("", xy=(mu_proposal, 0.2), xytext=(mu_current, 0.2),
arrowprops=dict(arrowstyle="->", lw=2.))
#x3.set(title=r'prior x likelihood $\propto$ posterior')
ax3.set(title='posterior(mu=%.2f) = %.5f\nposterior(mu=%.2f) = %.5f' % (mu_current, posterior_current, mu_proposal, posterior_proposal))
if accepted:
trace.append(mu_proposal)
else:
trace.append(mu_current)
ax4.plot(trace)
ax4.set(xlabel='iteration', ylabel='mu', title='trace')
plt.tight_layout()
#plt.legend()
# + [markdown] slideshow={"slide_type": "slide"}
# To visualize the sampling, we'll create plots for some quantities that are computed. Each row below is a single iteration through our Metropolis sampler.
#
# The first column is our prior distribution -- what our belief about $\mu$ is before seeing the data. You can see how the distribution is static and we only plug in our $\mu$ proposals. The vertical lines represent our current $\mu$ in blue and our proposed $\mu$ in either red or green (rejected or accepted, respectively).
#
# The 2nd column is our likelihood and what we are using to evaluate how good our model explains the data. You can see that the likelihood function changes in response to the proposed $\mu$. The blue histogram is our data. The solid line in green or red is the likelihood with the currently proposed mu. Intuitively, the more overlap there is between likelihood and data, the better the model explains the data and the higher the resulting probability will be. The dashed line of the same color is the proposed mu and the dashed blue line is the current mu.
#
# The 3rd column is our posterior distribution. Here we are displaying the normalized posterior.
#
# The 4th column is our trace (i.e. the posterior samples of $\mu$ we're generating) where we store each sample irrespective of whether it was accepted or rejected (in which case the line just stays constant).
#
# Note that we always move to relatively more likely $\mu$ values (in terms of their posterior density), but only sometimes to relatively less likely $\mu$ values, as can be seen in iteration 14 (the iteration number can be found at the top center of each row).
#
# + slideshow={"slide_type": "slide"}
np.random.seed(123)
sampler(data, samples=8, mu_init=-1., plot=True);
# + [markdown] slideshow={"slide_type": "slide"}
# What happens when we do this lots of times?
# + slideshow={"slide_type": "slide"}
posterior = sampler(data, samples=15000, mu_init=1.)
fig, ax = plt.subplots()
ax.plot(posterior)
_ = ax.set(xlabel='sample', ylabel='mu');
# + [markdown] slideshow={"slide_type": "slide"}
# Making a histogram of these results is our estimated posterior probability distribution.
# + slideshow={"slide_type": "slide"}
ax = plt.subplot()
ax.hist(posterior[500:],bins=30,alpha=0.5,normed='True',label='estimated posterior')
x = np.linspace(-.5, .5, 500)
post = calc_posterior_analytical(data, x, 0, 1)
ax.plot(x, post, 'g', label='analytic posterior')
_ = ax.set(xlabel='mu', ylabel='belief');
ax.legend(fontsize=10);
# + [markdown] slideshow={"slide_type": "slide"}
# Our algorithm for deciding where to move to next used a normal distribution where the mean was the current value and we had to assume a width. Find where we specified that and see what happens if you make it a lot smaller or a lot bigger.
# + [markdown] slideshow={"slide_type": "slide"}
# ### More Complex Models
#
# The example above was overkill in that we were only trying to estmate $\mu$. Note also that we can do this in less than 10 lines using the ${\tt pymc3}$ module.
#
# The process is essentially the same when you add more parameters. Check out this [animation of a 2-D process](http://twiecki.github.io/blog/2014/01/02/visualizing-mcmc/) by the same author whose example we just followed.
| Inference2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SciPy Optimize Module
#
#
# ## Module Contents
#
# * Optimization
# * Local
# - [1D Optimization](./Optimization_1D.ipynb)
# - [ND Optimization](./Optimization_ND.ipynb)
# - [Linear Programming](./Linear_Prog.ipynb)
# * Global Optimization
# - [Brute](./Optimization_Global_brute.ipynb)
# - [shgo](./Optimization_Global_shgo.ipynb)
# - [Differential Evolution](./Optimization_Global_differential_evolution.ipynb)
# - [Basin Hopping](./Optimization_Global_basinhopping.ipynb)
# - [Dual Annealing](./Optimization_Global_dual_annealing.ipynb)
# * Root Finding
# * [1D Roots](./Roots_1D.ipynb)
# * [ND Roots](./Roots_ND.ipynb)
# * [Curve Fitting and Least Squares](./Curve_Fit.ipynb)
# ## Optimization
#
# <img src="./images/global_local.svg" style="float:right; margin: 2px">
#
# Optimization algorithms find the smallest (or greatest) value of a function over a given range. <b>Local</b> methods find a point that is just the lowest point for some <i>neighborhood</i>, but multiple local minima can exist over the domain of the problem. <b>Global</b> methods try to find the lowest value over an entire region.
#
#
# The available local methods include `minimize_scalar`, `linprog`, and `minimize`. `minimize_scalar` only minimizes functions of one variable. `linprog` for <b>Linear Programming</b> deals with only linear functions with linear constraints. `minimize` is the most general routine. It can deal with either of those subcases in addition to any arbitrary function of many variables.
#
# Optimize provides 5 different global optimization routines. Brute directly computes points on a grid. This routine is the easiest to get working, saving programmer time at the cost of computer time. Conversely, shgo (Simplicial Homology Global Optimization) is a conceptually difficult but powerful routine. Differential Evolution, Basin Hopping, and Dual Annealing are all [Monte Carlo](https://en.wikipedia.org/wiki/Monte_Carlo_method) based, relying on random numbers. Differential Evolution is an [Evolutionary Algorithm](https://en.wikipedia.org/wiki/Evolutionary_algorithm), creating a "population" of random points and iterating it based on which ones give the lowest values. Basin Hopping and Dual Annealing instead work locally through a [Markov Chain](https://en.wikipedia.org/wiki/Markov_chain) random walk. They can computationally cope with a large number of dimensions, but they can get locally stuck and miss the global minimum. Basin Hopping provides a more straightforward base and options for a great deal of customization, and dual annealing provides a more complicated method to avoid getting locally trapped.
#
# ## Root Finding
#
# [`root`](./Roots_ND.ipynb) and [`root_scalar`](./Roots_1D.ipynb) solve the problem
# $$
# f(x) = 0.
# $$
# `root_scalar` is limited to when $x$ is a scalar, while for the more general `root` $x$ can be a multidimensional vector.
#
# ## Curve Fitting
#
# This module provides tools to optimize the fit between a parametrized function and data. `curve_fit` provides a simple interface where you don't have to worry about the guts of fitting a function. It minimizes the sum of the residuals over the parameters of the function, or:
# $$
# \text{min}_{\text{p}} \quad \sum_i \big( f(x_i , \text{p} ) - y_i \big)^2.
# $$
# If you want, you can instead pass this function to the provided non-linear least squares optimizer `least_squares`. If the model function is linear, `nnls` and `lsq_linear` exist as well.
# ## Common Traits in the Submodule
# ### Output: `OptimizeResult`
# <hr />
#
# Many functions return an object that can contain more information than simply "This is the minimium". The information varies between function, method used by the function, and flags given to function, but the way of accessing the data remains the same.
#
# Let's create one of these data types via minimization to look at it:
# +
f = lambda x : x**2
result=optimize.minimize(f,[2],method="BFGS")
# -
# You can determine what data types are availible via
result.keys()
# And you can access individual values via:
result.x
# Inspecting the object with `?` or `??` can tell you more about what the individual components actually are.
#
# In Jupyter Lab, Contextual Help, `Ctrl+I` can also provide this information.
# ? result
# ### `args`
# <hr />
#
# Many routines allow function parameters in a <b>tuple</b> to be passed to the routine via the `args` flag:
# +
f_parameter = lambda x,a : (x-a)**2
optimize.minimize(f_parameter,[0],args=(1,))
# -
# ### Methods
# <hr />
#
# The functions in `scipy.optimize` are uniform wrappers that can call to multiple different methods, algorithms, behind the scenes. For example, `minimize_scalar` can use Brent, Golden, or Bounded methods. Methods can have different strengths, weaknesses, and pitfalls. SciPy will automatically choose certain algorithms given inputted information, but if you know more about the problem, a different algorithm might be better.
#
# An example of choosing the routine:
# +
f = lambda x : x**2
optimize.minimize(f,[2],method="CG")
# -
# ### Method Options
# <hr />
#
# `minimize` itself has 14 different methods, and it's not the only routine that calls multiple methods. While much of the information and functionality is unified across the routine, each method does have it's individual settings. The settings can be found through the `show_options` function:
optimize.show_options(solver="minimize",method="CG")
# The settings are passed in a dictionary to the solver:
# +
options_dictionary = {
"maxiter": 5,
"eps": 1e-6
}
optimize.minimize(f,[2],options=options_dictionary)
# -
# ### Tolerance and Iterations
# <hr />
#
# How much computer time do you want to spend on this problem? How accurate do you need your answer? Is your function really expensive to calculate?
#
# When the two successive values are within the tolerance range of each other or the routine has reached the maximum number of iterations, the routine will exit. Some functions differentiate between <b>relative tolerance</b> and absolute tolerance</b>. Relative tolerance scales for the aboslute size of the values. For example, if two steps are five apart, but each about a trillion, the function can exit. Tolerance in the domain `x` direction also differs from the tolerance in the range `f` direction. For minimization, the `gtol` tolerance can also apply to zeroing the gradient.
#
# Some methods also allow for specifying both the maximum number of iterations and the maximum number of function evaluations. Some methods evaulate a function multiple times during each iteration.
#
# Whether these quantities exist, and the procedure for setting these quantities varies between functions and methods within functions. Check individual documentation for details, but here is one example:
optimize.minimize(f,[2],tol=1e-10,options={"maxiter":10})
| Optimize_Module.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Predicting Airline Data using a Generalized Linear Model (GLM) in Keras
#
# In particular, we will predict the probability that a flight is late based on its departure date/time, the expected flight time and distance, the origin and destitation airports.
#
# Most part of this notebooks are identical to what has been done in Airline Delay with a GLM in python3.ipynb
# The main difference is that we will use the [Keras](https://keras.io/) high-level library with a tensorflow backend (theano backend is also available) to perform the machine learning operations instead of scikit-learn.
#
# The core library for the dataframe part is [pandas](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html).<br>
# The core library for the machine learning part is [Keras](https://keras.io/). This library is mostly used for deeplearning/neural-network machine learning, but it can also be used to implement most of Generalized Linear Models. It is also quite easy to add new types of model layers into the Keras API if new functionalities would be needed.
#
# The other main advantage of Keras is that it a high level API on top of either tensorflow/theano. Writting new complex model is in Keras is much more simple than in tensorflow/theano. But keep the benefits of these low-level library for what concerns the computing performances on CPU/GPU.
#
# ### Considerations
#
# The objective of this notebook is to define a simple model offerring a point of comparison in terms of computing performances across datascience language and libraries. In otherwords, this notebook is not for you if you are looking for the most accurate model in airline predictions.
# + [markdown] deletable=true editable=true
# ## Install and Load useful libraries
# + deletable=true editable=true
# %matplotlib inline
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
# + [markdown] deletable=true editable=true
# ## Load the data (identical to python3 scikit-learn)
#
# - The dataset is taken from [http://stat-computing.org](http://stat-computing.org/dataexpo/2009/the-data.html). We take the data corresponding to year 2008.
# - We restrict the dataset to the first million rows
# - We print all column names and the first 5 rows of the dataset
# + deletable=true editable=true
df = pd.read_csv("2008.csv")
df.shape[0]
# + deletable=true editable=true
df = df[0:1000000]
# + deletable=true editable=true
df.columns
# + deletable=true editable=true
df[0:5]
# + [markdown] deletable=true editable=true
# ## Data preparation for training (identical to python3 scikit-learn)
#
# - We turn origin/destination categorical data to a "one-hot" encoding representation
# - We create a new "binary" column indicating if the flight was delayed or not.
# - We show the first 5 rows of the modified dataset
# - We split the dataset in two parts: a training dataset and a testing dataset containing 80% and 20% of the rows, respectively.
# + deletable=true editable=true
df = pd.concat([df, pd.get_dummies(df["Origin"], prefix="Origin")], axis=1);
df = pd.concat([df, pd.get_dummies(df["Dest" ], prefix="Dest" )], axis=1);
df = df.dropna(subset=["ArrDelay"])
df["IsArrDelayed" ] = (df["ArrDelay"]>0).astype(int)
df[0:5]
# + deletable=true editable=true
train = df.sample(frac=0.8)
test = df.drop(train.index)
# + [markdown] deletable=true editable=true
# ## Model building
#
# - We define the generalized linear model using a binomial function --> Logistic regression.
# - The model has linear logits = (X*W)+B = (Features * Coefficients) + Bias
# - The Loss function is a logistic function (binary_cross_entropy)
# - A L2 regularization is added to mimic what is done in scikit learn
# - Specific callbacks are defined (one for logging and one for early stopping the training)
# - We train the model and measure the training time --> ~55sec on an intel i7-6700K (4.0 GHz) with a GTX970 4GB GPU for 800K rows
# - The model is trained using a minibatch strategy (that can be tune for further performance increase)
# - We show the model coefficients
# - We show the 10 most important variables
# + deletable=true editable=true
#get the list of one hot encoding columns
OriginFeatCols = [col for col in df.columns if ("Origin_" in col)]
DestFeatCols = [col for col in df.columns if ("Dest_" in col)]
features = train[["Year","Month", "DayofMonth" ,"DayOfWeek", "DepTime", "AirTime", "Distance"] + OriginFeatCols + DestFeatCols ]
labels = train["IsArrDelayed"]
featuresMatrix = features.as_matrix()
labelsMatrix = labels .as_matrix().reshape(-1,1)
# + deletable=true editable=true
featureSize = features.shape[1]
labelSize = 1
training_epochs = 25
batch_size = 2500
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.regularizers import l2, activity_l2
from sklearn.metrics import roc_auc_score
from keras.callbacks import Callback
from keras.callbacks import EarlyStopping
#DEFINE A CUSTOM CALLBACK
class IntervalEvaluation(Callback):
def __init__(self): super(Callback, self).__init__()
def on_epoch_end(self, epoch, logs={}): print("interval evaluation - epoch: %03d - loss:%8.6f" % (epoch, logs['loss']))
#DEFINE AN EARLY STOPPING FOR THE MODEL
earlyStopping = EarlyStopping(monitor='loss', patience=1, verbose=0, mode='auto')
#DEFINE THE MODEL
model = Sequential()
model.add(Dense(labelSize, input_dim=featureSize, activation='sigmoid', W_regularizer=l2(1e-5)))
model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['accuracy'])
#FIT THE MODEL
model.fit(featuresMatrix, labelsMatrix, batch_size=batch_size, nb_epoch=training_epochs,verbose=0,callbacks=[IntervalEvaluation(),earlyStopping]);
# + deletable=true editable=true
coef = pd.DataFrame(data=model.layers[0].get_weights()[0], index=features.columns, columns=["Coef"])
coef = coef.reindex( coef["Coef"].abs().sort_values(axis=0,ascending=False).index ) #order by absolute coefficient magnitude
coef[ coef["Coef"].abs()>0 ] #keep only non-null coefficients
coef[ 0:10 ] #keep only the 10 most important coefficients
# + [markdown] deletable=true editable=true
# ## Model testing (identical to python3 scikit-learn)
#
# - We add a model prediction column to the testing dataset
# - We show the first 10 rows of the test dataset (with the new column)
# - We show the model ROC curve
# - We measure the model Area Under Curve (AUC) to be 0.689 on the testing dataset.
#
# This is telling us that our model is not super accurate (we generally assume that a model is raisonable at predicting when it has an AUC above 0.8). But, since we are not trying to build the best possible model, but just show comparison of data science code/performance accross languages/libraries.
# If none the less you are willing to improve this result, you should try adding more feature column into the model.
# + deletable=true editable=true
testFeature = test[["Year","Month", "DayofMonth" ,"DayOfWeek", "DepTime", "AirTime", "Distance"] + OriginFeatCols + DestFeatCols ]
pred = model.predict( testFeature.as_matrix() )
test["IsArrDelayedPred"] = pred
test[0:10]
# + deletable=true editable=true
fpr, tpr, _ = roc_curve(test["IsArrDelayed"], test["IsArrDelayedPred"])
AUC = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=4, label='ROC curve (area = %0.3f)' % AUC)
plt.legend(loc=4)
# + deletable=true editable=true
AUC
# + [markdown] deletable=true editable=true
# ## Key takeaways
#
# - We built a GLM model predicting airline delay probability in tensorflow
# - We train it on 800K rows in ~55sec on an intel i7-6700K (4.0 GHz) with a GTX970 GPU
# - We measure an AUC of 0.689, which is almost identical to python-3 scikit learn results
# - We demonstrated a typical workflow in python+keras in a Jupyter notebook
# - We can easilly customize the model using the several type of layers available in Keras. That would make our model much more accurarte and sophisticated with no additional pain in either complexity or computing performance.
#
# [Keras](https://keras.io/) documentation is quite complete and contains several examples from linear algebra to advance deep learning techniques.
# + deletable=true editable=true
| Airline Delay with a GLM in Keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter 2 End-to-end Machine Learning project
#
# ### 2.8 download data
#
# +
import os
import tarfile
from six.moves import urllib
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = os.path.join(DOWNLOAD_ROOT, HOUSING_PATH, "housing.tgz")
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
# -
fetch_housing_data()
# +
import pandas as pd
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
# -
housing = load_housing_data()
housing.head()
housing.info()
housing['ocean_proximity'].value_counts()
housing.describe()
# +
# %matplotlib inline
import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(20,15))
plt.show()
# -
# ### 2.9 create test set
#
# +
import numpy as np
# to make this notebook's output identical at every run
np.random.seed(42)
def split_train_test(data, test_ratio):
shuffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[: test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
# +
train_set, test_set = split_train_test(housing, 0.2)
print(len(train_set), 'train +', len(test_set), 'test')
# -
# ...or, use sklearn function to split train and test sets
# +
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
# -
test_set.head()
train_set.head()
housing['median_income'].hist()
housing['income_cat'] = pd.cut(housing['median_income'],
bins=[0., 1.5, 3.0, 4.5, 6., np.inf],
labels=[1, 2, 3, 4, 5])
housing.head()
housing['income_cat'].value_counts()
# +
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing['income_cat']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
# -
housing['income_cat'].value_counts()/len(housing)
# +
def income_cat_proportions(data):
return data['income_cat'].value_counts()/len(data)
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
compare_props = pd.DataFrame({'overall' : income_cat_proportions(housing),
'stratified' : income_cat_proportions(strat_test_set),
'random' : income_cat_proportions(test_set)}).sort_index()
compare_props['rand. %error'] = 100 * compare_props['random'] / compare_props['overall'] - 100
compare_props['strat. %error'] = 100 * compare_props['stratified'] / compare_props['overall'] - 100
# -
compare_props
for set in (strat_train_set, strat_test_set):
set.drop(['income_cat'], axis=1, inplace=True)
strat_train_set.head()
strat_test_set.head()
# ## 2.10 viz data
# make a copy of the data
housing = strat_train_set.copy()
housing.plot(kind='scatter', x='longitude', y='latitude')
housing.plot(kind='scatter', x='longitude', y='latitude', alpha=0.1)
# +
housing.plot(kind='scatter',
x='longitude',
y='latitude',
alpha=0.4,
s=housing['population']/100,
label='population',
figsize=(10,7),
c='median_house_value',
cmap=plt.get_cmap('jet'),
colorbar=True,
sharex=False)
plt.legend()
# -
# ## 2.11 looking for correlation
#
corr_matrix = housing.corr()
corr_matrix['median_house_value'].sort_values(ascending=False)
corr_matrix
# +
from pandas.plotting import scatter_matrix
attributes = ['median_house_value', 'median_income', 'total_rooms', 'housing_median_age']
scatter_matrix(housing[attributes], figsize=(12, 8))
# -
housing.plot(kind='scatter', x='median_income', y='median_house_value', alpha=0.1)
# ## 2.12 attribute combinations
# +
housing['rooms_per_household'] = housing["total_rooms"] / housing["households"]
housing['bedrooms_per_room'] = housing["total_bedrooms"] / housing["total_rooms"]
housing['population_per_household'] = housing["population"] / housing["households"]
# -
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
# +
from pandas.plotting import scatter_matrix
attributes = ['median_house_value', 'median_income', 'rooms_per_household', 'bedrooms_per_room', 'population_per_household']
scatter_matrix(housing[attributes], figsize=(12, 8))
# -
housing.plot(kind='scatter', x='rooms_per_household', y='median_house_value', alpha=0.1)
# plt.axis([0, 5, 0, 520000])
plt.show()
housing.plot(kind='scatter', x='bedrooms_per_room', y='median_house_value', alpha=0.2)
housing.describe()
# ## 2.13 prep data for ML
housing = strat_train_set.drop('median_house_value', axis=1)
housing_labels = strat_train_set['median_house_value'].copy()
housing.head()
housing_labels.head()
# ## 2.14 data clearning
# +
# housing.dropna(subset=['total_bedrooms']) # option 1
# housing.drop('tota_bedrooms', axis=1) # option 2
# option 3
# median = housing['total_bedrooms'].median()
# housing['total_bedrooms'].fillna(median)
# +
try:
from sklearn.impute import SimpleImputer
except ImportError:
from sklearn.preprocessing import Imputer as SimpleImputer
imputer = SimpleImputer(strategy='median')
# -
housing_num = housing.drop('ocean_proximity', axis=1)
imputer.fit(housing_num)
imputer.statistics_
housing_num.median().values
X = imputer.transform(housing_num)
housing_tr = pd.DataFrame(X, columns=housing_num.columns)
housing_tr.head()
housing_tr.describe()
# ## 2.15 scikit-learn design
# ## 2.16 text and categorical attribs
housing_cat = housing[['ocean_proximity']]
housing_cat.head(10)
# +
from sklearn.preprocessing import OrdinalEncoder
ordinal_encoder = OrdinalEncoder()
housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)
housing_cat_encoded[:10]
# -
ordinal_encoder.categories_
# +
from sklearn.preprocessing import OneHotEncoder
cat_encoder = OneHotEncoder()
housing_cat_1hot = cat_encoder.fit_transform(housing_cat)
housing_cat_1hot
# -
housing_cat_1hot.toarray()
housing_cat_1hot
cat_encoder.categories_
# ## 2.17 custom transformers
# +
from sklearn.base import BaseEstimator, TransformerMixin
rooms_ix, bedrooms_ix, population_ix, households_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_bedrooms_per_room=True):
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
rooms_per_household = X[:, rooms_ix] / X[:, households_ix]
population_per_household = X[:, population_ix] / X[:, households_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room]
else:
return np.c_[X, rooms_per_household, population_per_household]
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)
# -
housing_extra_attribs = pd.DataFrame(housing_extra_attribs,
columns=list(housing.columns)
+['rooms_per_household', 'population_per_household'],
index=housing.index)
housing_extra_attribs.head()
# ## 2.18 feature scaling
# ## 2.19 transformation pipeline
# +
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
num_pipeline = Pipeline([('imputer', SimpleImputer(strategy='median')),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
])
housing_num_tr = num_pipeline.fit_transform(housing_num)
# -
housing_num_tr
# +
from sklearn.compose import ColumnTransformer
num_attribs = list(housing_num)
cat_attribs = ['ocean_proximity']
full_pipeline = ColumnTransformer([('num', num_pipeline, num_attribs),
('cat', OneHotEncoder(), cat_attribs),
])
housing_prepared = full_pipeline.fit_transform(housing)
# -
housing_prepared
housing_prepared.shape
housing_labels.shape
# ## 2.20 select and train a model
# ## 2.21 training and evaluating on the training set
# +
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
# +
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)
print('predictions:', lin_reg.predict(some_data_prepared))
# -
print('labels:', list(some_labels))
# +
from sklearn.metrics import mean_squared_error
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
lin_rmse
# +
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor()
tree_reg.fit(housing_prepared, housing_labels)
# -
housing_predictions = tree_reg.predict(housing_prepared)
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
tree_rmse
# ## 2. 22 better evaluation using cross-validation
# +
from sklearn.model_selection import cross_val_score
scores = cross_val_score(tree_reg, housing_prepared, housing_labels,
scoring='neg_mean_squared_error',
cv=10
)
tree_rmse_scores = np.sqrt(-scores)
# -
tree_rmse_scores
# +
def display_scores(scores):
print('scores:', scores)
print('mean:', scores.mean())
print('std:', scores.std())
display_scores(tree_rmse_scores)
# +
lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels,
scoring='neg_mean_squared_error',
cv=10
)
lin_rmse_scores = np.sqrt(-lin_scores)
display_scores(lin_rmse_scores)
# +
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor(n_estimators=100, random_state=42)
forest_reg.fit(housing_prepared, housing_labels)
# +
housing_predictions = forest_reg.predict(housing_prepared)
forest_mse = mean_squared_error(housing_labels, housing_predictions)
forest_rmse = np.sqrt(forest_mse)
forest_rmse
# +
from sklearn.model_selection import cross_val_score
forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels,
scoring='neg_mean_squared_error',
cv=10
)
forest_scores_rmse = np.sqrt(-forest_scores)
display_scores(forest_scores_rmse)
# +
from sklearn.svm import SVR
forest_svr = SVR(kernel='linear')
forest_svr.fit(housing_prepared, housing_labels)
housing_predictions = forest_svr.predict(housing_prepared)
forest_mse = mean_squared_error(housing_labels, housing_predictions)
forest_scores_rmse = np.sqrt(-forest_scores)
display_scores(forest_scores_rmse)
# -
# ## 2.23 fine-tune model
# ## 2.24 grid search
# +
from sklearn.model_selection import GridSearchCV
param_grid = [{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}
]
forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, param_grid,
cv=5,
scoring='neg_mean_squared_error',
return_train_score=True
)
grid_search.fit(housing_prepared, housing_labels)
# -
grid_search.best_params_
grid_search.best_estimator_
# +
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres['mean_test_score'], cvres['params']):
print(np.sqrt(-mean_score), params)
# -
pd.DataFrame(grid_search.cv_results_)
# ## 2.25 randomied search
# ## 2.26 ensemble methods
# # 2.27 analyze the best models and their errors
feature_importances = grid_search.best_estimator_.feature_importances_
feature_importances
extra_attribs = ['rooms_per_hhold', 'pop_per_hhold', 'bedrroms_per_hhold']
cat_encoder = full_pipeline.named_transformers_['cat']
cat_one_hot_attribs = list(cat_encoder.categories_[0])
attributes = num_attribs + extra_attribs + cat_one_hot_attribs
sorted(zip(feature_importances, attributes), reverse=True)
# ## 2.28 evaluate your systems on the test set
# +
final_model = grid_search.best_estimator_
X_test = strat_test_set.drop('median_house_value', axis=1)
y_test = strat_test_set['median_house_value'].copy()
X_test_prepared = full_pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)
final_mse = mean_squared_error(y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
# -
final_rmse
# +
from scipy import stats
confidence = 0.95
squared_errors = (final_predictions - y_test) ** 2
np.sqrt(stats.t.interval(confidence, len(squared_errors)-1, loc=squared_errors.mean(), scale=stats.sem(squared_errors)))
# -
| Ch2 end-to-end machine learning project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Optimización media-varianza
#
# <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/d/da/Newton_optimization_vs_grad_descent.svg" width="400px" height="400px" />
#
#
# La **teoría de portafolios** es uno de los avances más importantes en las finanzas modernas e inversiones.
# - Apareció por primera vez en un [artículo corto](https://www.math.ust.hk/~maykwok/courses/ma362/07F/markowitz_JF.pdf) llamado "Portfolio Selection" en la edición de Marzo de 1952 de "the Journal of Finance".
# - Escrito por un desconocido estudiante de la Universidad de Chicago, llamado <NAME>.
# - Escrito corto (sólo 14 páginas), poco texto, fácil de entender, muchas gráficas y unas cuantas referencias.
# - No se le prestó mucha atención hasta los 60s.
#
# Finalmente, este trabajo se convirtió en una de las más grandes ideas en finanzas, y le dió a Markowitz el Premio Nobel casi 40 años después.
# - Markowitz estaba incidentalmente interesado en los mercados de acciones e inversiones.
# - Estaba más bien interesado en entender cómo las personas tomaban sus mejores decisiones cuando se enfrentaban con "trade-offs".
# - Principio de conservación de la miseria. O, dirían los instructores de gimnasio: "no pain, no gain".
# - Si queremos más de algo, tenemos que perder en algún otro lado.
# - El estudio de este fenómeno era el que le atraía a Markowitz.
#
# De manera que nadie se hace rico poniendo todo su dinero en la cuenta de ahorros. La única manera de esperar altos rendimientos es si se toma bastante riesgo. Sin embargo, riesgo significa también la posibilidad de perder, tanto como ganar.
#
# Pero, ¿qué tanto riesgo es necesario?, y ¿hay alguna manera de minimizar el riesgo mientras se maximizan las ganancias?
# - Markowitz básicamente cambió la manera en que los inversionistas pensamos acerca de esas preguntas.
# - Alteró completamente la práctica de la administración de inversiones.
# - Incluso el título de su artículo era innovador. Portafolio: una colección de activos en lugar de tener activos individuales.
# - En ese tiempo, un portafolio se refería a una carpeta de piel.
# - En el resto de este módulo, nos ocuparemos de la parte analítica de la teoría de portafolios, la cual puede ser resumida en dos frases:
# - No pain, no gain.
# - No ponga todo el blanquillo en una sola bolsa.
#
#
# **Objetivos:**
# - ¿Qué es la línea de asignación de capital?
# - ¿Qué es el radio de Sharpe?
# - ¿Cómo deberíamos asignar nuestro capital entre un activo riesgoso y un activo libre de riesgo?
#
# *Referencia:*
# - Notas del curso "Portfolio Selection and Risk Management", Rice University, disponible en Coursera.
# ___
# ## 1. Línea de asignación de capital
#
# ### 1.1. Motivación
#
# El proceso de construcción de un portafolio tiene entonces los siguientes dos pasos:
# 1. Escoger un portafolio de activos riesgosos.
# 2. Decidir qué tanto de tu riqueza invertirás en el portafolio y qué tanto invertirás en activos libres de riesgo.
#
# Al paso 2 lo llamamos **decisión de asignación de activos**.
# Preguntas importantes:
# 1. ¿Qué es el portafolio óptimo de activos riesgosos?
# - ¿Cuál es el mejor portafolio de activos riesgosos?
# - Es un portafolio eficiente en media-varianza.
# 2. ¿Qué es la distribución óptima de activos?
# - ¿Cómo deberíamos distribuir nuestra riqueza entre el portafolo riesgoso óptimo y el activo libre de riesgo?
# - Concepto de **línea de asignación de capital**.
# - Concepto de **radio de Sharpe**.
# Dos suposiciones importantes:
# - Funciones de utilidad media-varianza.
# - Inversionista averso al riesgo.
# La idea sorprendente que saldrá de este análisis, es que cualquiera que sea la actitud del inversionista de cara al riesgo, el mejor portafolio de activos riesgosos es idéntico para todos los inversionistas.
#
# Lo que nos importará a cada uno de nosotros en particular, es simplemente la desición óptima de asignación de activos.
# ___
# ### 1.2. Línea de asignación de capital
# Sean:
# - $r_s$ el rendimiento del activo riesgoso,
# - $r_f$ el rendimiento libre de riesgo, y
# - $w$ la fracción invertida en el activo riesgoso.
#
# <font color=blue> Realizar deducción de la línea de asignación de capital en el tablero.</font>
# **Tres doritos después...**
# #### Línea de asignación de capital (LAC):
# $E[r_p]$ se relaciona con $\sigma_p$ de manera afín. Es decir, mediante la ecuación de una recta:
#
# $$E[r_p]=r_f+\frac{E[r_s-r_f]}{\sigma_s}\sigma_p.$$
#
# - La pendiente de la LAC es el radio de Sharpe $\frac{E[r_s-r_f]}{\sigma_s}=\frac{E[r_s]-r_f}{\sigma_s}$,
# - el cual nos dice qué tanto rendimiento obtenemos por unidad de riesgo asumido en la tenencia del activo (portafolio) riesgoso.
# Ahora, la pregunta es, ¿dónde sobre esta línea queremos estar?
# ___
# ### 1.3. Resolviendo para la asignación óptima de capital
#
# Recapitulando de la clase pasada, tenemos las curvas de indiferencia: **queremos estar en la curva de indiferencia más alta posible, que sea tangente a la LAC**.
#
# <font color=blue> Ver en el tablero.</font>
# Analíticamente, el problema es
#
# $$\max_{w} \quad E[U(r_p)]\equiv\max_{w} \quad E[r_p]-\frac{1}{2}\gamma\sigma_p^2,$$
#
# donde los puntos $(\sigma_p,E[r_p])$ se restringen a estar en la LAC, esto es $E[r_p]=r_f+\frac{E[r_s-r_f]}{\sigma_s}\sigma_p$ y $\sigma_p=w\sigma_s$. Entonces el problema anterior se puede escribir de la siguiente manera:
#
# $$\max_{w} \quad r_f+wE[r_s-r_f]-\frac{1}{2}\gamma w^2\sigma_s^2.$$
#
# <font color=blue> Encontrar la $w$ que maximiza la anterior expresión en el tablero.</font>
# **Tres doritos después...**
# La solución es entonces:
#
# $$w^\ast=\frac{E[r_s-r_f]}{\gamma\sigma_s^2}.$$
#
# De manera intuitiva:
# - $w^\ast\propto E[r_s-r_f]$: a más exceso de rendimiento que se obtenga del activo riesgoso, más querremos invertir en él.
# - $w^\ast\propto \frac{1}{\gamma}$: mientras más averso al riesgo seas, menos querrás invertir en el activo riesgoso.
# - $w^\ast\propto \frac{1}{\sigma_s^2}$: mientras más riesgoso sea el activo, menos querrás invertir en él.
# ___
# ## 2. Ejemplo de asignación óptima de capital: acciones y billetes de EU
# Pongamos algunos números con algunos datos, para ilustrar la derivación que acabamos de hacer.
#
# En este caso, consideraremos:
# - **Portafolio riesgoso**: mercado de acciones de EU (representados en algún índice de mercado como el S&P500).
# - **Activo libre de riesgo**: billetes del departamento de tesorería de EU (T-bills).
#
# Tenemos los siguientes datos:
#
# $$E[r_{US}]=11.9\%,\quad \sigma_{US}=19.15\%, \quad r_f=1\%.$$
# Recordamos que podemos escribir la expresión de la LAC como:
#
# \begin{align}
# E[r_p]&=r_f+\left[\frac{E[r_{US}-r_f]}{\sigma_{US}}\right]\sigma_p\\
# &=0.01+\text{S.R.}\sigma_p,
# \end{align}
#
# donde $\text{S.R}=\frac{0.119-0.01}{0.1915}\approx0.569$ es el radio de Sharpe (¿qué es lo que es esto?).
#
# Grafiquemos la LAC con estos datos reales:
# Importamos librerías que vamos a utilizar
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# Datos
Ers = .119
ss = .1915
rf = .01
# Radio de Sharpe para este activo
RS = (Ers - rf)/ss
# Vector de volatilidades del portafolio (sugerido: 0% a 50%)
sp = np.linspace(0,.5)
# LAC
Erp = rf + RS*sp
# Gráfica
plt.figure(figsize=(6, 4))
plt.plot(sp, Erp, lw=3, label='LAC')
plt.plot(0, rf, 'ob', ms=10, label='Libre de riesgo')
plt.plot(ss, Ers, 'or', ms=10, label='Portafolio/activo riesgoso')
plt.legend(loc='best')
plt.xlabel('Volatilidad $\sigma$')
plt.ylabel('Rendimiento esperado $E[r]$')
plt.grid()
# Bueno, y ¿en qué punto de esta línea querríamos estar?
# - Pues ya vimos que depende de tus preferencias.
# - En particular, de tu actitud de cara al riesgo, medido por tu coeficiente de aversión al riesgo.
#
# Solución al problema de asignación óptima de capital:
#
# $$\max_{w} \quad E[U(r_p)]$$
#
# $$w^\ast=\frac{E[r_s-r_f]}{\gamma\sigma_s^2}$$
# Dado que ya tenemos datos, podemos intentar para varios coeficientes de aversión al riesgo:
# importar pandas
import pandas as pd
# Crear un DataFrame con los pesos, rendimiento
# esperado y volatilidad del portafolio óptimo
# entre los activos riesgoso y libre de riesgo
# cuyo índice sean los coeficientes de aversión
# al riesgo del 1 al 10 (enteros)
gamma = np.arange(1,11)
dist_cap = pd.DataFrame({'$\gamma$':gamma,
'$w^{\ast}$':(Ers - rf) / (gamma * ss**2)})
dist_cap
g = 4.5
w_ac = (Ers - rf) / (g * ss**2)
w_ac
# ¿Cómo se interpreta $w^\ast>1$?
# - Cuando $0<w^\ast<1$, entonces $0<1-w^\ast<1$. Lo cual implica posiciones largas en el mercado de activos y en el activo libre de riesgo.
# - Por el contrario, cuando $w^\ast>1$, tenemos $1-w^\ast<0$. Lo anterior implica una posición corta en el activo libre de riesgo (suponiendo que se puede) y una posición larga (de más del 100%) en el mercado de activos: apalancamiento.
# # Anuncios parroquiales.
#
# ## 1. Quiz la siguiente clase.
#
# ## 2. Pueden consultar sus calificaciones en el siguiente [enlace](https://docs.google.com/spreadsheets/d/1BwI1Mm7B3xxJ-jQIQEDQ_WdRHyehZrQBpHGd0hY9fU4/edit?usp=sharing)
#
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#808080; background:#fff;">
# Created with Jupyter by <NAME>.
# </footer>
| Modulo3/Clase12_OptimizacionMediaVarianza.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="1s6mjoaXjoSp" executionInfo={"status": "ok", "timestamp": 1640829171229, "user_tz": 360, "elapsed": 146, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}}
import re
# + id="d-_QCnVEjp8e" executionInfo={"status": "ok", "timestamp": 1640829171393, "user_tz": 360, "elapsed": 2, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}}
string = """>Rosalind_4776
TTTGTATTTCGATGTCTTGGACAACTCTTCTCATCCTTCGAGGCCCACATTGGACCCTGC
ATCGGGAGGTATGGGGGACCGAGAATGAATAGCGCGTGTGCGCATCAAGATTCAGAGCGA
GACGCGATAGGATATCGGGCGAGCGCATGTGAGACGCCTCCTCTCGAGTCTGTAGCGGAT
CCCGGCGCGCATAACTTCCCTTACGAATGCTTTGGGAATTACAACCATCCTTTGTAGCAT
TTTGACGGAAAGACATGTTCGAAGGAATGGCTTGGGAGGTGTCCCCGCCCACTTGCGACT
GACTTGCTTATACCTACTTTGATTCTCGTCTGTTCTAGATAGCCCAGTTGCCATACCCTT
TAACCCCCGAGCGAATAACAATTGCAGCTCGCCTAATCAGAGAGTCGTACATGGTCGGAA
TGTTCTATAGCTGGTCAGGGCGGGGCGTTTCGTTCCAAGTGTTCAGTGGTAGACCCTGCT
TAGTGGGTGCAGCACAGGGGTCGGGGCCGGCGAGGATTTGGGTGCCGAAGAAGCAGTGTA
CAATTGAACATAGTACCATCCGGTGGAAGAGGAGACGATTAGTCCCGAAATGAAATCACA
TAAGACCCATGTGGGTGAAAAAGAGGACAACGAGGCAACCTTCGTCCCGCGGTACGACTC
TGCGACCGACGTGATCTCGCGTGAATCGCCTTTGATGGTGCCAAAGTTTAGTTCATTAAA
TCGATGTATGAAGGAGCAGCCAGCACGTCAGTATTTTGCCCCCTGGGCCACAGCCACATA
TCGGCATTTACGACTTTTCAGTAGGCTGGCAGCTTTACGTTTGGGCTTTGTCGTTTTTGT
CGTCCGAATGTTTATCCTGATGACCAAGTGCCTCGGTTTCGCGCAGAATCGGTTGTACTA
CCTTACACCTTCATTCTAGCCGCTCCTGGTTTCCT
>Rosalind_4614
TTGGGCATCCACACCGGGCGACTATAGTGTTATTCAGAGATTGGGCAGCGGGGATAGAAC
CACCCTTACGGCCGTTTGCGTTATCTGTCCTAACAATCATTTCAGTCTACTAGATCGCCG
ATAACAGGGTCCCGTAGCTGTGGGAACGAAACTAGTCCCGGTAGCTGAATTCAGTCAGTG
AGCTCCTTAGCTCGACTGACACCGCGACCATGCGTCTCACCGCAATTCAGGGAGGTGTAA
CAATGGGCCTCATTAATACGGTAAGCATTAAATGTGCGAACTACCGAGCCAGGACTGGAG
ACCACCGCTCCTGCATTCTTCTCTACTTATCCAGCAATATGGTGTACCCGGTTTAGACCA
GTTAATGACCCCATCTTGTAGTAGGAAGGCACTAACCCTGGATGCAATCACTGTCAGTGC
TGCCTCCATGTACTTATACCACGAAGACAATCCGTTCTCATTGGTCCTTAGTGATTGATT
GATGTTTACACACTTCGGCAGAGCCGCTTCTAACTTTTTTGCATGATTGTCCTATCCATC
ATAATTTTTTACGGCGGAAATCCACATACCATCACGTTAAGCCTCCAGTTAGCTCGGGCA
GTCAATCGAAAGTCATGCCGTTCTTAAAGCTTTTCGCCTGTTCAACGAGCACCTACTGGT
TAGCCAGTCCTTCCATACTTATTAGAGTGTGGGTCCAACAGCCGTAAAATATGATCGGAA
ACATGGAAAGATACCTGCTTTTCACCGTATCCCGTGTACTAGGTGCGAGCCGGCCCATGT
CACGAGTTGGCTCTGGTCAATGTTTCAC
>Rosalind_6073
GTAGTCGACCCGGGGTGTTCTGCAACTAATCGGGACACTATCGCAGATTCCAGTTGCTTG
GAAATCACTACTAGGGCTCGCCTCTTGCTTTGAGGTTACAGGCTTGAGGGGCGACGCTTC
CCACTTCCACGTAAGGAAGATCGCAGGCTCGGTGATGAACGGTTCCAGGTGCCGATGCGC
CCAACTCTTCCTAACCAACCAAACGCATCATTAGCTCACTGTCTTGTTACCCAGGCTACA
AGCTTTTTGCTGTTCGTACACCATCACCGAGTCCGTATCTAGCCGGGACTGAGAATGAGT
TGTTGGGTTGGTTGAGGAGGTTGGAAGATCCAGATGCCAAGACACTCGAAGCCTCCGGTT
CCAACGCTTCTTAGTCCCGAGTGCTACATGACTCTCAATGTAAGTGGGTCTCCTACGCCT
CTCTAACTAAGCTCCGGATGTCGCCCCCGGCTGCCATAGGTCAATGTGAACCCACAAGGG
ATAGTGGCTTGCGAGGTGGGCCTAGAGGATCGGTCGGCCCTTTGGGTCGCGCGTCGCGAT
ACGAGCGAAGAATATCACTATTCTTCGTGATAGTTATCATTCTGGCCAGACAAGTTAGGC
CGAACGCTTGGCATTGAGACGACCCCGGTTTGTTACCGGCTCCCGTGTTAAAGGCACACC
GGTATATAAAAGGGGTAACGTCTCGGGGTTACAAGCCTCTGTAGTTCCCTCACGGCCCCA
GCTCTTCACAAAGCGTAACCACCCAGCACTGTACAATGATTCTGATGCGGCTAGGGACCT
AAAAGCGGCGGGTTGCCTCGCGCTTAGACCTGTTCCGAGTGTAGCTAGCAAAGATGCAGA
GCTTGATAACGTAAAGACTG
>Rosalind_4462
GTTCTACCCGAGCGCTCCCTCCGAATCGCACGGGCCGGGCCGGAAATGCAGAGATTTAGA
ACACATCTCGAAAATAGCAAGTTGCGCCCTTAAAGTCGAGTACACCACCACTAGACTCAA
CAACATAGCATTCGAGGTTAGGGCGCGAATTAAATAGAGGACCAGACCCCTCTATAGTAC
CAGTGGCGGTTGCAGAGGCTTGGGTGGCCCGTTGATGTGAAATCGAGCGCAACCTTGGCC
GCGAGGTCGGGCTATCAGTTATGCTTAGGCCCCACCGTGTGGGAACAACTGGTGGTGATC
GACTCGTCGTCAAATTATGGACACATGATCTAAAGTTGTCGGTGTACTATTGACAATATG
CACCGTGTCCCCCAAGGCGGCAATTGCGAGAACGTGTATCGAACAGCGACATTGCCGAGG
GTCCACTAGTTAATAGTCACACGATTTAAGCGAGATGCATCATCGTGGAGGACGACCGCG
CCGACTTGATTGCACCAGTCATGTTATTAGGAGGCCCCAAGCATCATGCGCCAGTCACTT
ATTACCAATGTATACCCAAATTACCCAAGGAACTGTAAACTGCTTAAAGCGTTCTACTAG
TCCCTCTAGGAGACCGTTAACTTTTAACCCATCGACCTAATATTCAGAAGGCAGAGGCCT
GAGCCCCAGAACTTACTCAGTCATTCAACAGTCAAAGAGTCAAAATGCCATATTGTTTCC
TTCTATTCGTCGCACAAAGGCCGCTCGATGACGATTAACACCACGGACCAGACCAACACC
AAGGAGCTCCAACCGACTTGGGACTAGCTCACCCGTAGCATATGGAAAAACCGAATTGAT
AAATGATAGCTGGCATGGGTGTGTTCCTAGTTGTCACCAGACACCTTTCGCCCGCCCCCT
TTTGAACTGTAGACTATAGTTTTTTTCAGGCTTGCTTCTT
>Rosalind_2655
GTTGACTTGTTTGCTGCGTCAGATCAACTGCTCTACGCACGGTGAGTACAAGGCGTCTAT
GTATTAAACGTCAGCGGAAAATTAAAAGGCAGAGCGGTTGGGGTGGACGTACTTTCACTT
CTAAGAGTAGTAGAACGCGGATTTCTGGCGCTAAGAAGATGAAGCAGAAGCACACCCATT
GAATAATGGAGCACTATCCTGGGTCAAGAGGCCGCTGTCCACCGCATCCTTAAGTGTCGG
TGTTCGCCCCGCACGCACATCATTGGGTACGCCGGTGTCGTATGAGCAAAGTCGGGAACA
ACATATGTATAGGGGGACCAAAGTCGATTAAGACGTGTGGCTCACTAGGTTCTCCGACGC
TTCGACCGATGATAGGAGGAGCGGCCATGGCCTGGCTCGCATTCAGTAAGGTCTCACCCT
TGTTTTGTAGGCTGACGCGCGTGAGTTATCCGTTTACCGTCGTAGTAAGGATCCTGATGG
GCGAGTCGACGAGGTGCGAGTCTGCATGAACGAATTGTAATGGGCCGTCCGATGCCAGAT
TTATTTTGGAATAGTTTAGTGGCTGCTCTGCTGGGACCTTGGCGATGATGAAGGCGCACG
AAACAAAAGGTTCTCCTTACCCTGCCGCAAGTGTGCTGGTCGTCTACGTATATACAGTCG
GTGCTGCTTGTCAGATGTCCGATGGTATACCACGGGATACGGCTGATAATATAAACGACC
GCGTGGATTAGCAGCAGTAGCAGGATACTAGACAGGTTATGTTGCCTAATAGCGCCGCCT
CCCTTCAAGTGACCCAGGACTGCGTTATCACGCCCAGTCCAGACAGTTAGCAGGCCCGCG
TGTAGCGTGTTCACGCCAGGCACTGTCACGATCAATCCGCTTTATGGACGTAAAACCAGC
CACTGTTCACAGTAAATTAAAATAAGCCGTAAAAACGCCGGAAACTTTTGGAGTGTCGTC
C
>Rosalind_4774
AGGTGCGGTGTCTAAATCACCGGTAGGCAACCGACTTTGATCATGTCAGTGCGGATCCGA
TGCGGGATGGAACAGGTTGGGCGTTAGCCCATCGCGACGCATGTCAATATGGTACCCTGC
GACGCATAGCAGGGAGGATCCAGCCAAGCTTGCACCATAAATGTGCATGTAATCTTGCTT
TATTTTCTCTCACACAATCGTGGACCTGGTGTCCTAAATTACACGAAATTGCCTGATAGC
ATCTCGCGATGAAGAGTCCTATACGTTCAAAGGAAACTTTGATCGTAGCCGTAGAGATAT
ATACCCGTTCTACGTCTAGACCATGTGGATCCTGGATACTTCCGCTTATGTAAGACCAGC
GGCGTACGATGCACATTTACATGCCACGTACTGAGGTATCGATTGGCACTTGCGTATCTG
CCCGTACCTGAGGTGGCTACCAGAACTTTCCCTCCTCAACACATGACGGAAACCTTGTCC
TCTGCCGGTCCCCACGTCTACAATATTTCTTCCGCGTCTCAAGTAAAGGCTCTATATTGG
TACCTACGCGTATTATACTGTGAAAGTTCGCTCGTGAGCAGATGTGCAGCGCGGGGGGAC
TTTGCGTCCCTTGCGTAGAAACCCTATCTTCATTCTTCGTTGGTGGTAAATAGTTGTGAA
CACGTTTCAATGTTTGCATGTGCACTCTACTAGTCCGTTTTTCACGAAAACGTTCGGCGA
AGTCCGTATGTCAGTGGCGCTTGGAGATTTGAAATATGACAGACATGGTCAGTCACCCAA
GTAGACGGGCAAGAAGTGCACTCGAACCGCGAAGCTGCCCGATTGG
>Rosalind_0206
TTAATTGAGAGCGGTTTAATACTGATGTCCTTTCGTTCCAGGGTGGTCCTAGATTCGTGC
TTCCTGGGACGTGCGTTTCGCCAGTGATTTGTTGCGAGACGACTTGTTTTCGATAACTTC
ACAATCCGTGCAGGCACAATAATGGGCTGCGGAATCTGTGGCAAGGAGTAGGTGCAGCGG
TTCACGCCCTTGGTGAAATTGTGCAGGTTTTCGGATATTAAGGACGAGTGTGGTCAAGGA
CAAACTCATTACGACTCTCTTTGAGAGTAAACTATACCTGAGCCCTGGCGTAAGCAGACA
GCTCCATAACTGCGAGGATCCAAACTAACGCTTCACCTGGTCTAAACCTAGTCCTGGCTT
CAGGCGGAACCTACTCGATTAGGGTGAGCAAAGGAGGGGGCCGTTAACAACCAATTAGCG
TTCGGGCTCGGCTCCACTTGGCGCGCATACATACCATGCGACCAAATTAGCAAACAAGCG
ATAACCCCTCTAGTTGACGCATCAACATCAAGATCAACTGACTATGTAAATGCCCCAACT
ACGCCGAGTATGCCGTTTGAAGAACTATCCTGGCCTGTAGGTATAGAACCGACCGGCGAG
CCGTGCACTGTTGCACCGGTGTCTGAAAGTATTTGGAGGATTTAGACACCATGCGCCCAG
TGGTTGCTGTGGCAATCCTCGCCGGGGGCCCAGGAGGCGTGGTAACTTAGCAGGAAGTTC
TGGCCTATGGAGGTAACAATTCCTGATTGAAAATGTGCTGCCCAGATCTCCGGCCTCTTG
CACTTAACTCAAAAGTTGTTAGCCCTCTTGTAGCGGACTTTTCATCCATTCGACCATCGT
CTTCCACCACCCTCTATCATTATAGTCACTAGTGACCCGGATGAATGTACGAGTAGCATG
ACTTTGCGGAACATGAGCTAAAGGCAAGAGGGGGATATTGCGGAATGTA
>Rosalind_3092
CGAGAGGGTACGGCTACAACTTCGCCACTCAAGATAAAACTGTAGACCATTTTTAGCAGT
CGTTCAAGCAGTCCCAATCGAACCTGCTTCTGAATTACGGCCATGGATGAACTTTTCACC
GATCTGGGCCCGACCCTATGGTCTATCATTTAACGGCATACGCAAAGTCTCGGAGCGAAC
TGTCTATAAGATTACCCTGGAGGACTTGTATGATAGGGTCGAGGATCAGTGTACGATGAT
GCGATAGAGGTATTAGGCGACGTAGACAGTTTTAAGTGTTCGCGTCGTGTAGTGTTGTTT
CGCCATCAAAAGCCCTGCCGATGCAGGAGATGATTAAGCTGCCACTAGATGCACGGAGTA
GCATAGGAGGGCGCAACCGTGGTTTCGGCGCCGCAAACTAGAGAACGAGTTGGGCGCACT
CATCTGCCTTCGAAGTTCTTGCCTTCGTGACAGACGTGTAATTGTCTAGGTGTGTTCCCC
GGACTCTGATGAAATGACCGGCTGCTACGATTCGAGTGCCTCCTCGGTGGCGGTTAGGAT
TTATCCAGAGTCCCTGGTGGATGCAAAATGCGTCGATAGCAAATCCCATAGCCACCCAAT
GGATAGGTGCTCCATCCCCAGTCGGGTATCTGCAGCACCTAGAGAAGTTGGGCGAGGCTC
GCAGTCTTTGGCACTATAGACTCAAGAGAAGTATGAGTGGTAGATTCTTTGAGATATGGC
CTCTGATCCATCTCGGGAAAATTTCTCAGATCTCGGTTGTCCACATTGATGATGGGTTGT
GAGGTTAGTTACAATTGCTGGGGGAGCTATACCGCCGGTCACCCATTCATGACGCAGTTT
CAAATCTCCTAGTCCAAGAGGGCTGAAAGGTTTGGGCGAATCGTGTACACGCCTTTGCCA
AGTAGTATCGCATTTGCATCTGGGGGAAGAACATCAGACACCGTTCTCGCCCGTGCGCGC
TTACAGGACACGCGACAGGATGATTCGGGTAAATAGCCAG
>Rosalind_8271
AATTCGAACAGTTGGTTTGTAGGGAATCAACTGGAGCCAACCGCCAAAAACACCAAAGCC
ATGACCTATATTCCCGGAGAAGCCGTGAGCACATAACCGGTCGACCGGCCCAATTCAACA
CTTCGATGTACACCTTTGGGAAGCCTATAATAATGTTGGCGGTTAGGACTGAGACTATTA
AGACCTCGGCTTATAGGCCGATGGAGTATGCCCCCCCAGCGGGGCCATAGAAGCCCAGCT
TCGCCTGATCAAGTAATTAGCACTGGCCTTGCCATTTGTCCGATCATGGTACTCACTTAT
CTGTCATAATCCCTTAGGCATAATAGATAGCAAAGCTGACGTGGACCCGTTGTCTGCCGA
GAGGCATGGTTCTGCTAAAAATCGAGGGCCGGTACTCGCAGCATTACCTCGCTCTTACGA
GGGACTGCCCTGATTTCTTTCTAGCGCCGACCTTGTACTACAGAATTGCTATTCTCTAAC
TTGTCACGGGAACCAGGCCGCTTCATTAGACCTGGCATGTAAAGATCGGAGGATCTGCGC
GCGTCTGGGACAACGCGACTAATAACTCGTTACGGTAAAGTTTGATCGGTAGTATCACTC
CTAACGCACATTCTCCGTGTCACCGCAAACGCAAATGTAGGGCCAAGGCGTGTTATCCAG
GTACAATTTGATACGTGGATAGTCCGGCTTCTAAACTCACGGAGATCCTTCTATTATTGA
AGTTTAGTCTCACATGGGGAATGTTTAGTATATACATCAATATTATCGCCCCCCTACTCA
CCAGGAGCAGTGTGTAGTTAACAATAAGAACGGGACATGAATGAGCTACCAATACGGTTT
CTGC
>Rosalind_7322
TTGGATCCCTTTAATGCCAGAAAAGCGACTATACTCCCGCGTCCACTCGTGAGTTCCATG
TTTTGGCATGTCGGAATCCGGAGCGGCTGATGATCGAAATAGAGCAATTGCTTTGTCATA
TTGAACTCACTACGAAGCGAAATTCAGTGAACGTAGCTCTGACCCGGCTGTCTATTAAGG
CATATGTACCCGAAACGCGAGGGCTAAGATACGTGCCAAGTCCAGACCTCGTAGAGCATT
TGATTATGTATGCTGGCGTGGAGACGACCGACAATGAAAAGTTCGTCGGACAAGACTGAA
TGCCGAGGCCGGACCCTGAACGAACCTCGACTTTATTTGGTAACGAGATCGGCCGGACAT
GTTAGGCCGCAAGCCAATCCTCGCGTCTCGCACAATGCGGTAGCCACACAAAATGATCTA
GGGAGCTGTTAGCTTAAGCGTCTTATGCATGTACTTTGAGGACTGCCCGTCAACCACCTT
GCAGTTGACGGGAACCCTGCCCCGACCCGTATAACTAGCTCCTGTAATATGCGAACGGGC
CCCGGAAGATACGACGGGGTGGTGTCGGCAATTAATTGAGCCTCCGGAAGCTAACAAGTG
TGCGAAGTTATTTATTGATCGCAGGGCGATTCTCTTGATTACACCACCGAGCCTTTAGAT
CTAGGAATATGACTAATGCGAGGGAAGTGGCCACTCTCGTGTATCTCGGTTTTTCTCGCT
AAAGGGCCAAATATAAGATTGGCGTTGGTTCCAAACCTTTTGATGGAAGGCCACGCATCA
TCAATGCGGGCTCTGAATTACAATAGGGTGTAATGTTCCACTAGATAAATTCCATGCTCT
CTGGGCACCTTTCCGACAAATTCGGGTTCAATTATTCGGAGTAACTATCTCGCCCCGGCA
GTCACCGAAGCGTATAACACCGTGCCTCTGCGATAAACCCCA"""
# + id="zYVILr37lLa1" executionInfo={"status": "ok", "timestamp": 1640829171660, "user_tz": 360, "elapsed": 4, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}}
def stringBinarySearch(strs, l, r, x):
mid = l + (r - l) // 2
if strs[mid:mid+2] == x:
return mid+1
elif strs[mid:mid+2] < x:
return stringBinarySearch(strs,mid,r,x)
else:
return stringBinarySearch(strs,l,mid,x)
# + id="nwtq_Edglq6k" executionInfo={"status": "ok", "timestamp": 1640829172327, "user_tz": 360, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}}
def gc_content(string):
N = len(string)
string = string.upper()
sorted_characters = sorted(string)
sorted_string = "".join(sorted_characters)
end_idxs = {i[0]:stringBinarySearch(sorted_string, 0, N, i) for i in ["AC", "CG", "GT"]}
counter = {"A":end_idxs["A"], "C":end_idxs["C"] - end_idxs["A"], "G":end_idxs["G"] - end_idxs["C"], "T":N - end_idxs["G"]}
gc_c = (counter["G"] + counter["C"])/N
return gc_c
# + id="1RNa3_filtnV" executionInfo={"status": "ok", "timestamp": 1640829172862, "user_tz": 360, "elapsed": 1, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}}
def max_gc_string(strings):
names = re.findall(r">Rosalind_\d+", strings)
names = [i[1:] for i in names]
dna_strings = re.split(r">Rosalind_\d+", strings)[1:]
dna_strings = [i.replace("\n", "") for i in dna_strings]
gc_contents = [gc_content(i) for i in dna_strings]
idx = gc_contents.index(max(gc_contents))
return names[idx], gc_contents[idx]*100
# + colab={"base_uri": "https://localhost:8080/"} id="kzrt9nVsm6Y9" executionInfo={"status": "ok", "timestamp": 1640829173194, "user_tz": 360, "elapsed": 1, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggw8qO1PovyIdiAN_klBN78Ku2yZx3kD_72EaQuFg=s64", "userId": "12317420038182215048"}} outputId="7e566417-4c80-47ec-e7d0-acd6cccdbf6c"
max_gc_string(string)
# + id="DW2T87mnApog"
| solution_notebooks/Computing GC Content.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 5.12 稠密连接网络(DenseNet)
#
# > 1. **与ResNet的主要区别在于,DenseNet里模块B的输出不是像ResNet那样和模块A的输出相加,而是在通道维上连结**
# > 2. **这样模块A的输出可以直接传入模块B后面的层。这样模块A可以和后面的层连接**
# ![普通网络和残差网络](img/5.12_densenet.svg)
# ### 5.12.1 稠密块
# +
import time
import torch
from torch import nn, optim
import torch.nn.functional as F
import sys
sys.path.append("..")
import d2lzh_pytorch.utils as d2l
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# -
def conv_block(in_channels, out_channels):
blk = nn.Sequential(nn.BatchNorm2d(in_channels), nn.ReLU(),
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))
return blk
# 每块使用相同的输出通道数
# 前向计算时,把每块的输入和输出在通道上连接
class DenseBlock(nn.Module):
def __init__(self, num_convs, in_channels, out_channels):
super(DenseBlock, self).__init__()
net = []
for i in range(num_convs):
in_c = in_channels + i * out_channels
net.append(conv_block(in_c, out_channels))
self.net = nn.ModuleList(net)
self.out_channels = in_channels + num_convs * out_channels # 计算输出通道数
def forward(self, X):
for blk in self.net:
Y = blk(X)
X = torch.cat((X, Y), dim=1) # 在通道维上连接
return X
# +
# 2个卷积块, 输入通道数为3, 输出通道为10, 最后通道数为 3 + 2 * 10 = 23
blk = DenseBlock(2, 3, 10)
X = torch.rand(4, 3, 8, 8)
Y = blk(X)
Y.shape
# -
# ### 5.12.2 过渡层
#
# > 1. **由于稠密块会增加通道数,所以需要过渡层控制**
# > 2. **通过$1 \times 1$卷积层来减小通道数,步幅为2的平均池化层减小宽和高**
def transition_block(in_channels, out_channels):
blk = nn.Sequential(nn.BatchNorm2d(in_channels), nn.ReLU(),
nn.Conv2d(in_channels, out_channels, kernel_size=1),
nn.AvgPool2d(kernel_size=2, stride=2))
return blk
blk = transition_block(23, 10)
blk(Y).shape
# ### 5.12.3 DenseNet模型
net = nn.Sequential(nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
# +
# 使用4个稠密块
# 使用过渡层来减半高和宽,并减半通道数
num_channels, growth_rate = 64, 32 # num_channels 为当前的通道数, growth_rate 为卷积层通道数
num_convs_in_dense_blocks = [4, 4, 4, 4]
for i, num_convs in enumerate(num_convs_in_dense_blocks):
DB = DenseBlock(num_convs, num_channels, growth_rate)
net.add_module('DenseBlock_%d' % i, DB)
# 上一个稠密块的输出通道数
num_channels = DB.out_channels
# 在稠密块之间加入减小通道数的过渡层
if i != len(num_convs_in_dense_blocks) - 1:
net.add_module('transition_block_%d' % i, transition_block(num_channels, num_channels // 2))
num_channels = num_channels // 2
# -
# 最后接上全局池化层和全连接输出层
net.add_module('BN', nn.BatchNorm2d(num_channels))
net.add_module('relu', nn.ReLU())
net.add_module('global_avg_pool', d2l.GlobalAvgPool2d()) # (Batch, num_channels, 1, 1)
net.add_module('fc', nn.Sequential(d2l.FlattenLayer(), nn.Linear(num_channels, 10)))
X = torch.rand((1, 1, 96, 96))
for name, layer in net.named_children():
X = layer(X)
print(name, ' output shape:\t', X.shape)
# ### 5.12.4 获取数据并训练模型
# +
batch_size = 256
# 如出现“out of memory”的报错信息,可减小batch_size或resize
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=96)
lr, num_epochs = 0.001, 5
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
# d2l.train_ch5(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs)
# -
| dl/dive-into-dl/chapter05-CNN/5.12_densenet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="rJHntLUGHWqW"
# # Fine-tuning Training on Two-tower BERT<sub>BASE</sub> + Transformer
#
# This notebook contains fine-tuning two-tower BERT<sub>BASE</sub> + Transformer model fitting for recommendation task on users and items reviews.
# + id="ohB1nITJ1EdF"
import os
import urllib
from google.colab import drive, files
from getpass import getpass
from google.colab import drive
# + colab={"base_uri": "https://localhost:8080/"} id="AiQnW4Q81VqM" executionInfo={"elapsed": 301, "status": "ok", "timestamp": 1617974843723, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05289514665097855798"}, "user_tz": 240} outputId="71c6b17e-cdab-42e6-e079-02ae80330581"
ROOT = '/content/drive'
GOOGLE_DRIVE_PATH = 'My Drive/Colab Notebooks/recommender/w266-final'
PROJECT_PATH = os.path.join(ROOT, GOOGLE_DRIVE_PATH)
drive.mount(ROOT)
# %cd {PROJECT_PATH}
# + id="Pdb-ImP91gxi"
import os
import sys
import re
import pandas as pd
import numpy as np
import itertools
import pickle
import random
import tensorflow as tf
import matplotlib.pyplot as plt
from commons.store import PickleStore, NpyStore
from tqdm import tqdm
from IPython.core.display import HTML
from importlib import reload
# %load_ext autoreload
# %autoreload 2
# + [markdown] id="6ZK1RdVaIHr4"
# ## 1. Load Pre-filtered Dataset
#
# First, we load the clean pre-processed dataset.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="MLUxSobB1m6m" executionInfo={"elapsed": 19342, "status": "ok", "timestamp": 1617974866437, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05289514665097855798"}, "user_tz": 240} outputId="6985fdc6-913a-4c76-b9d7-85835b34e8c0"
amazon = False
if amazon:
input_pkl = '../dataset/25-65_tokens_grouped_Movies_and_TV_v2.pkl'
else:
input_pkl = '../dataset/25-65_tokens_grouped_yelp.pkl'
pkl_store = PickleStore(input_pkl)
grouped_reviews_df = pkl_store.load(asPandasDF=True, \
columns=['reviewerID', 'asin', 'overall', 'userReviews', 'itemReviews'])
print(len(grouped_reviews_df))
display(HTML(grouped_reviews_df.head(1).to_html()))
# + [markdown] id="EmJVMPH1IOcC"
# We select columns that are relevant and covert them to numpy array.
# + id="HRSkA_1718GM"
grouped_reviews = grouped_reviews_df[['reviewerID', 'asin', 'overall', 'userReviews', 'itemReviews']].to_numpy()
# + [markdown] id="lVcAIUGeIRWA"
# We detect hardware and based on the outcome, we define the accelerator strategy.
# + colab={"base_uri": "https://localhost:8080/"} id="4HQ_R1v031JZ" executionInfo={"elapsed": 3301, "status": "ok", "timestamp": 1617974874358, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05289514665097855798"}, "user_tz": 240} outputId="ba13683e-5ca6-4c4b-e11a-8c57dabbdcbd"
# Detect hardware
try:
tpu_resolver = tf.distribute.cluster_resolver.TPUClusterResolver() # TPU detection
except ValueError:
tpu_resolver = None
gpus = tf.config.experimental.list_logical_devices("GPU")
# Select appropriate distribution strategy
if tpu_resolver:
tf.config.experimental_connect_to_cluster(tpu_resolver)
tf.tpu.experimental.initialize_tpu_system(tpu_resolver)
strategy = tf.distribute.experimental.TPUStrategy(tpu_resolver)
print('Running on TPU ', tpu_resolver.cluster_spec().as_dict()['worker'])
elif len(gpus) > 1:
strategy = tf.distribute.MirroredStrategy([gpu.name for gpu in gpus])
print('Running on multiple GPUs ', [gpu.name for gpu in gpus])
elif len(gpus) == 1:
strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU
print('Running on single GPU ', gpus[0].name)
else:
strategy = tf.distribute.get_strategy() # default strategy that works on CPU and single GPU
print('Running on CPU')
print("Number of accelerators: ", strategy.num_replicas_in_sync)
# + colab={"base_uri": "https://localhost:8080/"} id="WpFbpkSGXxO1" executionInfo={"elapsed": 7174, "status": "ok", "timestamp": 1617974881185, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05289514665097855798"}, "user_tz": 240} outputId="f983d2a4-bc85-4190-93c0-a0c4feced38b"
# !pip install transformers
# + id="Pf0r0GgB4ZyX"
import tensorflow as tf
from transformers import BertTokenizer
# + [markdown] id="mWadZJBMIdpG"
# Here, we load BERT tokenizer using BERT huggingface <img src='https://huggingface.co/front/assets/huggingface_logo.svg' width='20px'> library.
# + colab={"base_uri": "https://localhost:8080/", "height": 164, "referenced_widgets": ["bfb4982fdd944f38b4af4ff330806d75", "972a81f4d8134302b64627753862e099", "<KEY>", "500043f2100f41dabda4ff1cc2ba4096", "14c1d085bcee40a68b00addbb067ec5a", "123ba57dd8a6473a8874b743db226fb7", "581198c7d50b4441aebd0909b8289469", "f118aec5d377497791ce7957bd33bba6", "40e8e92c7ac54f92968f47c262516a8a", "d68c953cfddc48e0bdeeada4381b7c35", "<KEY>", "300d4067dd634d3fbfb750ad09b338ab", "<KEY>", "5e6fe93bda07498cbbde5d1dc99e4b49", "<KEY>", "<KEY>", "2fee7005c92e432bae0b8e9792e007fd", "cc39c3800a2f4f2180b44bddeb4462e3", "4886e7c259e645478b727b7b48309654", "<KEY>", "556a384b85df4615aa73e57d3169b99e", "f5872ae36c934caca476c987f87c1f22", "<KEY>", "<KEY>"]} id="nF533QBe4nri" executionInfo={"elapsed": 1044, "status": "ok", "timestamp": 1617974893637, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05289514665097855798"}, "user_tz": 240} outputId="041c9164-c0b7-4ca0-df58-d295b511bc5c"
bert_model_name = 'bert-base-uncased'
MAX_LEN = 32
BATCH_SIZE = 16 * strategy.num_replicas_in_sync
NO_EPOCHS = 20
BUFFER_SIZE = 100
with strategy.scope():
tokenizer = BertTokenizer.from_pretrained(bert_model_name, do_lower_case=True)
# + [markdown] id="xqwrZN2SImC2"
# ## 2. Train, Validation, and Test Split
#
# We split our dataset to train, validation, and test and ensure there is no leak among those.
#
# For the model fitting, we use `MAX_LEN=32` with `BATCH_SIZE=16` and `NO_EPOCHS=20`. We take those considerations such that we can fit our model into a single GPU because of extremely large trainable parameters.
# + id="1KmF5_sg4vXQ"
def train_test_split(reviews, test_percent=0.1):
samples = len(reviews)
train_size = int((1 - test_percent) * samples)
train = reviews[:train_size]
test = reviews[train_size:]
return train, test
# + id="2ql_D-b244_x"
np.random.seed(111)
np.random.shuffle(grouped_reviews)
# split for train and test
if amazon:
test_percent=0.007
else:
test_percent=0.01
train, test = train_test_split(grouped_reviews, test_percent=test_percent)
# + id="ce3_4hGh5HOb"
test_revId_uniques = np.unique(test[:,0])
test_asin_uniques = np.unique(test[:,1])
# only pick test reviewerID and asin that are not in train
train = train[np.isin(train[:,0], test_revId_uniques, invert=True, assume_unique=True)]
train = train[np.isin(train[:,1], test_asin_uniques, invert=True, assume_unique=True)]
# split again for train and validation
if amazon:
val_percent=0.05
else:
val_percent=0.08
train, val = train_test_split(train, test_percent=val_percent) # ,
val_revId_uniques = np.unique(val[:,0])
val_asin_uniques = np.unique(val[:,1])
# only pick val reviewerID and asin that are not in train
train = train[np.isin(train[:,0], val_revId_uniques, invert=True, assume_unique=True)]
train = train[np.isin(train[:,1], val_asin_uniques, invert=True, assume_unique=True)]
# pick and rearrange to userReviews, itemReviews, overall
train = train[:, [3, 4, 2]]
val = val[:, [3, 4, 2]]
eval = test[:, [0, 1, 3, 4, 2]]
test = test[:, [3, 4, 2]]
# + id="5SVitYKw8Q35"
def __tokenize(reviews, tokenizer, max_len):
return tokenizer(list(reviews), padding='max_length', truncation=True, max_length=max_len, return_tensors='tf')
def create_tensor_dataset(samples, tokenizer, max_len=128):
"""generate dataset to tensorflow dataset format for two-tower network"""
def gen():
for i, reviews in enumerate(samples):
# tokenize each group of users and items reviews
user_tokens = __tokenize(reviews[0], tokenizer, max_len)
item_tokens = __tokenize(reviews[1], tokenizer, max_len)
yield ({'user_input_ids': [user_tokens.data['input_ids']],
'user_token_type_ids': [user_tokens.data['token_type_ids']],
'user_attention_masks': [user_tokens.data['attention_mask']],
'item_input_ids': [item_tokens.data['input_ids']],
'item_token_type_ids': [item_tokens.data['token_type_ids']],
'item_attention_masks': [item_tokens.data['attention_mask']]},
{'label': [reviews[2]]})
# generator with output signature
dataset = tf.data.Dataset.from_generator(
gen,
output_signature=({'user_input_ids': tf.TensorSpec(shape=(None, None, max_len), dtype=tf.int32),
'user_token_type_ids': tf.TensorSpec(shape=(None, None, max_len), dtype=tf.int32),
'user_attention_masks': tf.TensorSpec(shape=(None, None, max_len), dtype=tf.int32),
'item_input_ids': tf.TensorSpec(shape=(None, None, max_len), dtype=tf.int32),
'item_token_type_ids': tf.TensorSpec(shape=(None, None, max_len), dtype=tf.int32),
'item_attention_masks': tf.TensorSpec(shape=(None, None, max_len), dtype=tf.int32)},
{'label':tf.TensorSpec(shape=(None), dtype=tf.float32)})
)
return dataset
# + [markdown] id="N2lLGKtQI9ny"
# Because of the variant number of reviews on each grouped of users and items reviews, only tensorflow Dataset API with `from_generator` can support to format our dataset and be fitted to our two-tower BERT<sub>BASE</sub> + Transformer model. The drawback is the training cannot be parallelized to multiple accelerators because the generator itself is a python function, and therefore tensorflow is unable to serialize that function to multiple accelerators strategy. This method, however, can support for streaming large dataset and generate the dataset by batches to fit into the model.
# + id="wSyY9Yq-BRPn"
train_dataset = create_tensor_dataset(train, tokenizer=tokenizer,
max_len=MAX_LEN)
val_dataset = create_tensor_dataset(val, tokenizer=tokenizer,
max_len=MAX_LEN)
test_dataset = create_tensor_dataset(test, tokenizer=tokenizer,
max_len=MAX_LEN)
# + [markdown] id="pptVeJmVCYHd"
# ## 3. Model Definition and Callbacks
#
# + [markdown] id="mh1E9KKeKxON"
# ### 3.1. BERT<sub>BASE</sub> + Transformer
#
# Here, we ensemble our two-tower BERT<sub>BASE</sub> with additional transformer network on each towers. Our transformer model contains encoder-decoder network with multi-head attention. The motivation is we want our ensemble transformer model to learn overall semantic meaning from those group of users and items reviews and apply those to our recommendation downstream task.
#
# We create a `RecommenderConfig` class, so that we can configure such number of multi-head attentions, number of units in a Dense layer, number of units in a Feedforward Network, number of encoders-decoders, etc.
#
# The number of encoders-decoders configuration essentially dictates how many encoders and decoders in a single Transformer network. And therefore, we apply this for our BERT<sub>BASE</sub> + DeepTranformer experiment. Again, we need to be cautious on this hyperparameter due to our compute resource constraints.
# + id="8qZKpiihCgRT"
from transformers import TFBertModel, BertConfig
from tensorflow.keras import Model, Input, Sequential
from tensorflow.keras.layers import Layer, Flatten, Concatenate, Dense, Add, Dot, \
Dropout, GlobalAveragePooling2D, MultiHeadAttention, LayerNormalization
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, \
LearningRateScheduler, ReduceLROnPlateau, EarlyStopping
# %load_ext tensorboard
class RecommenderConfig():
def __init__(self, max_len=64, hidden_dim=32, num_layers = 1,
embedding_dim = 768, num_heads=2, feedforward_dim=32, rate=0.1):
self.max_len = max_len
self.hidden_dim = hidden_dim
self.rate = rate
# this configuration is for transformer layer
self.num_layers = num_layers
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self.feedforward_dim = feedforward_dim
class EncoderLayer(Layer):
def __init__(self, embedding_dim, num_heads, feedforward_dim, rate=0.1):
super().__init__()
self.attn = MultiHeadAttention(num_heads=num_heads, key_dim=embedding_dim)
self.feedforward = Sequential(
[Dense(feedforward_dim, activation="relu"), Dense(embedding_dim),]
)
self.layernorm1 = LayerNormalization(epsilon=1e-6)
self.layernorm2 = LayerNormalization(epsilon=1e-6)
self.dropout1 = Dropout(rate)
self.dropout2 = Dropout(rate)
def call(self, inputs, training):
attn_output = self.attn(inputs, inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
feedforward_output = self.feedforward(out1)
feedforward_output = self.dropout2(feedforward_output, training=training)
return self.layernorm2(out1 + feedforward_output)
class Encoder(Layer):
def __init__(self, num_layers, embedding_dim, num_heads, feedforward_dim, rate=0.1):
super().__init__()
self.num_layers = num_layers
self.enc_layers = [EncoderLayer(embedding_dim, num_heads,
feedforward_dim, rate) for _ in range(num_layers)]
self.dropout = Dropout(rate)
def call(self, inputs, training):
inputs = self.dropout(inputs, training=training)
for i in range(self.num_layers):
inputs = self.enc_layers[i](inputs, training)
return inputs
class DecoderLayer(Layer):
def __init__(self, embedding_dim, num_heads, feedforward_dim, rate=0.1):
super().__init__()
self.attn1 = MultiHeadAttention(num_heads=num_heads, key_dim=embedding_dim)
self.attn2 = MultiHeadAttention(num_heads=num_heads, key_dim=embedding_dim)
self.feedforward = Sequential(
[Dense(feedforward_dim, activation="relu"), Dense(embedding_dim),]
)
self.layernorm1 = LayerNormalization(epsilon=1e-6)
self.layernorm2 = LayerNormalization(epsilon=1e-6)
self.layernorm3 = LayerNormalization(epsilon=1e-6)
self.dropout1 = Dropout(rate)
self.dropout2 = Dropout(rate)
self.dropout3 = Dropout(rate)
def call(self, inputs, enc_output, training):
attn_output1 = self.attn1(inputs, inputs)
attn_output1 = self.dropout1(attn_output1, training=training)
out1 = self.layernorm1(inputs + attn_output1)
attn_output2 = self.attn2(enc_output, enc_output) #, out1
attn_output2 = self.dropout2(attn_output2, training=training)
out2 = self.layernorm2(out1 + attn_output2)
feedforward_output = self.feedforward(out2)
feedforward_output = self.dropout3(feedforward_output, training=training)
return self.layernorm3(out2 + feedforward_output)
class Decoder(Layer):
def __init__(self, num_layers, embedding_dim, num_heads,
feedforward_dim, rate=0.1):
super().__init__()
self.num_layers = num_layers
self.dec_layers = [DecoderLayer(embedding_dim, num_heads,
feedforward_dim, rate) for _ in range(num_layers)]
self.dropout = Dropout(rate)
def call(self, inputs, enc_output, training):
inputs = self.dropout(inputs, training=training)
for i in range(self.num_layers):
inputs = self.dec_layers[i](inputs, enc_output, training)
return inputs
class TransformerLayer(Layer):
def __init__(self, recommender_config):
super().__init__()
num_layers = recommender_config.num_layers
embedding_dim = recommender_config.embedding_dim
num_heads = recommender_config.num_heads
feedforward_dim = recommender_config.feedforward_dim
rate = recommender_config.rate
#self.encoder = EncoderLayer(embedding_dim, num_heads, feedforward_dim, rate)
#self.decoder = DecoderLayer(embedding_dim, num_heads, feedforward_dim, rate)
self.encoder = Encoder(num_layers, embedding_dim, num_heads,
feedforward_dim, rate)
self.decoder = Decoder(num_layers, embedding_dim, num_heads,
feedforward_dim, rate)
def call(self, inputs, training):
out = self.encoder(inputs, training)
out = self.decoder(inputs, out, training)
return out
class BertLayer(Layer):
def __init__(self, config, max_len):
super().__init__()
self.config = config
self.max_len = max_len
self.bert = TFBertModel.from_pretrained(bert_model_name, config=self.config)
def call(self, input):
return tf.map_fn(
lambda x: self.bert(x).last_hidden_state,
dtype=tf.int32,
elems=input,
fn_output_signature=tf.TensorSpec(shape=(None, self.max_len, 768), dtype=tf.float32)
)
class RecommenderClassifier():
def __init__(self, bert_config, recommender_config):
self.bert_config = bert_config
self.recommender_config = recommender_config
self.max_len = self.recommender_config.max_len
self.hidden_dim = self.recommender_config.hidden_dim
self.rate = self.recommender_config.rate
self.layernorm = LayerNormalization(epsilon=1e-6)
self.user_ids, self.user_token_types, self.user_masks, self.user_tower = self.__create_tower('user')
self.item_ids, self.item_token_types, self.item_masks, self.item_tower = self.__create_tower('item')
self.joined = Concatenate()([self.user_tower, self.item_tower])
self.dropout = Dropout(self.rate)(self.joined)
self.out1 = Dense(1)(self.dropout)
def __create_tower(self, name):
input_ids_layer = Input(shape=(None, self.max_len), name=f'{name}_input_ids', dtype=tf.int32)
token_type_ids_layer = Input(shape=(None, self.max_len), name=f'{name}_token_type_ids', dtype=tf.int32)
attention_mask_layer = Input(shape=(None, self.max_len), name=f'{name}_attention_masks', dtype=tf.int32)
bert_layer = BertLayer(self.bert_config, self.max_len)
input_embedding = bert_layer([input_ids_layer, attention_mask_layer, token_type_ids_layer])
transformer_layer = TransformerLayer(self.recommender_config)
input_embedding = transformer_layer(input_embedding)
mean_embedding = GlobalAveragePooling2D(name=f'{name}_mean')(input_embedding)
tower = Dense(self.hidden_dim, activation="relu", name=f'{name}_dense')(mean_embedding)
tower = self.layernorm(tower)
return input_ids_layer, token_type_ids_layer, attention_mask_layer, tower
def build_model(self):
dotproduct = Dot(axes=1)([self.user_tower, self.item_tower])
output = Add(name='label')([self.out1, dotproduct])
model = Model(inputs=[self.user_ids, self.user_token_types, self.user_masks,
self.item_ids, self.item_token_types, self.item_masks],
outputs=[output])
return model
# + [markdown] id="1LYsMWW4XVZC"
# ### 3.2 BERT<sub>BASE</sub> + Bi-Transformer
#
# This is a variant of our ensemble Transformer model where we stack dual Transformer encoder-decoder on top of the BERT<sub>BASE</sub> network. We can only stack two Transformer encoder-decoder as again due to our compute resource constraints.
# + id="IiBZouLdXUnZ"
class RecommenderClassifierBiTransformer():
def __init__(self, bert_config, recommender_config):
self.bert_config = bert_config
self.recommender_config = recommender_config
self.max_len = self.recommender_config.max_len
self.hidden_dim = self.recommender_config.hidden_dim
self.rate = self.recommender_config.rate
self.layernorm = LayerNormalization(epsilon=1e-6)
self.user_ids, self.user_token_types, self.user_masks, self.user_tower = self.__create_tower('user')
self.item_ids, self.item_token_types, self.item_masks, self.item_tower = self.__create_tower('item')
self.joined = Concatenate()([self.user_tower, self.item_tower])
self.dropout = Dropout(self.rate)(self.joined)
self.out1 = Dense(1)(self.dropout)
def __create_tower(self, name):
input_ids_layer = Input(shape=(None, self.max_len), name=f'{name}_input_ids', dtype=tf.int32)
token_type_ids_layer = Input(shape=(None, self.max_len), name=f'{name}_token_type_ids', dtype=tf.int32)
attention_mask_layer = Input(shape=(None, self.max_len), name=f'{name}_attention_masks', dtype=tf.int32)
bert_layer = BertLayer(self.bert_config, self.max_len)
input_embedding = bert_layer([input_ids_layer, attention_mask_layer, token_type_ids_layer])
# Bi-Transformer
transformer_layer1 = TransformerLayer(self.recommender_config)
transformer_layer2 = TransformerLayer(self.recommender_config)
input_embedding = transformer_layer1(input_embedding)
input_embedding = transformer_layer2(input_embedding)
mean_embedding = GlobalAveragePooling2D(name=f'{name}_mean')(input_embedding)
tower = Dense(self.hidden_dim, activation="relu", name=f'{name}_dense')(mean_embedding)
tower = self.layernorm(tower)
return input_ids_layer, token_type_ids_layer, attention_mask_layer, tower
def build_model(self):
dotproduct = Dot(axes=1)([self.user_tower, self.item_tower])
output = Add(name='label')([self.out1, dotproduct])
model = Model(inputs=[self.user_ids, self.user_token_types, self.user_masks,
self.item_ids, self.item_token_types, self.item_masks],
outputs=[output])
return model
# + [markdown] id="-kGxTymrYgb2"
# ### 3.3. BERT + Modified Transformer (FFN + MultiHead + FFN)
#
# This is another variant of our Transfomer encoder-decoder network where we put multi-head attention in between feedforward network within each encoders and decoders. This experiment, however, did not improve our regular Transformer encoder-decoder network performance. Therefore, we do not pursue this experiment further.
# + colab={"base_uri": "https://localhost:8080/"} id="MoRIPJLBYc_R" executionInfo={"elapsed": 790, "status": "ok", "timestamp": 1617164235926, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05289514665097855798"}, "user_tz": 240} outputId="41f871fb-a132-49f8-a24a-8d0a7675c5fa"
from transformers import TFBertModel, BertConfig
from tensorflow.keras import Model, Input, Sequential
from tensorflow.keras.layers import Layer, Flatten, Concatenate, Dense, Add, Dot, \
Dropout, GlobalAveragePooling2D, MultiHeadAttention, LayerNormalization
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, \
LearningRateScheduler, ReduceLROnPlateau, EarlyStopping
# %load_ext tensorboard
class RecommenderConfig():
def __init__(self, max_len=64, hidden_dim=32, embedding_dim = 768,
num_heads=2, feedforward_dim=32, rate=0.1):
self.max_len = max_len
self.hidden_dim = hidden_dim
self.rate = rate
# this configuration is for transformer layer
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self.feedforward_dim = feedforward_dim
class EncoderLayerModified(Layer):
def __init__(self, embedding_dim, num_heads, feedforward_dim, rate=0.1):
super().__init__()
self.attn = MultiHeadAttention(num_heads=num_heads, key_dim=embedding_dim)
self.feedforward1 = Sequential(
[Dense(feedforward_dim, activation="relu"), Dense(embedding_dim),]
)
self.feedforward2 = Sequential(
[Dense(feedforward_dim, activation="relu"), Dense(embedding_dim),]
)
self.layernorm1 = LayerNormalization(epsilon=1e-6)
self.layernorm2 = LayerNormalization(epsilon=1e-6)
self.layernorm3 = LayerNormalization(epsilon=1e-6)
self.dropout1 = Dropout(rate)
self.dropout2 = Dropout(rate)
self.dropout3 = Dropout(rate)
def call(self, inputs, training):
feedforward_output1 = self.feedforward1(inputs)
feedforward_output1 = self.dropout1(feedforward_output1, training=training)
feedforward_output1 = self.layernorm1(inputs + feedforward_output1)
attn_output = self.attn(feedforward_output1, feedforward_output1)
attn_output = self.dropout2(attn_output, training=training)
attn_output = self.layernorm2(feedforward_output1 + attn_output)
feedforward_output2 = self.feedforward2(attn_output)
feedforward_output2 = self.dropout3(feedforward_output2, training=training)
return self.layernorm3(attn_output + feedforward_output2)
class DecoderLayerModified(Layer):
def __init__(self, embedding_dim, num_heads, feedforward_dim, rate=0.1):
super().__init__()
self.attn1 = MultiHeadAttention(num_heads=num_heads, key_dim=embedding_dim)
self.attn2 = MultiHeadAttention(num_heads=num_heads, key_dim=embedding_dim)
self.feedforward1 = Sequential(
[Dense(feedforward_dim, activation="relu"), Dense(embedding_dim),]
)
self.feedforward2 = Sequential(
[Dense(feedforward_dim, activation="relu"), Dense(embedding_dim),]
)
self.feedforward3 = Sequential(
[Dense(feedforward_dim, activation="relu"), Dense(embedding_dim),]
)
self.layernorm1 = LayerNormalization(epsilon=1e-6)
self.layernorm2 = LayerNormalization(epsilon=1e-6)
self.layernorm3 = LayerNormalization(epsilon=1e-6)
self.layernorm4 = LayerNormalization(epsilon=1e-6)
self.layernorm5 = LayerNormalization(epsilon=1e-6)
self.dropout1 = Dropout(rate)
self.dropout2 = Dropout(rate)
self.dropout3 = Dropout(rate)
self.dropout4 = Dropout(rate)
self.dropout5 = Dropout(rate)
def call(self, inputs, enc_output, training):
feedforward_output1 = self.feedforward1(inputs)
feedforward_output1 = self.dropout1(feedforward_output1)
feedforward_output1 = self.layernorm1(inputs + feedforward_output1)
attn_output1 = self.attn1(feedforward_output1, feedforward_output1)
attn_output1 = self.dropout2(attn_output1, training=training)
attn_output1 = self.layernorm2(feedforward_output1 + attn_output1)
feedforward_output2 = self.feedforward2(enc_output)
feedforward_output2 = self.dropout3(feedforward_output2)
feedforward_output2 = self.layernorm3(attn_output1 + feedforward_output2)
attn_output2 = self.attn2(feedforward_output2, feedforward_output2) #, out1
attn_output2 = self.dropout4(attn_output2, training=training)
attn_output2 = self.layernorm4(feedforward_output2 + attn_output2)
feedforward_output3 = self.feedforward3(attn_output2)
feedforward_output3 = self.dropout5(feedforward_output3, training=training)
return self.layernorm5(attn_output2 + feedforward_output3)
class TransformerLayerModified(Layer):
def __init__(self, recommender_config):
super().__init__()
embedding_dim = recommender_config.embedding_dim
num_heads = recommender_config.num_heads
feedforward_dim = recommender_config.feedforward_dim
rate = recommender_config.rate
self.encoder = EncoderLayerModified(embedding_dim, num_heads, feedforward_dim, rate)
self.decoder = DecoderLayerModified(embedding_dim, num_heads, feedforward_dim, rate)
def call(self, inputs, training):
out = self.encoder(inputs, training)
out = self.decoder(inputs, out, training)
return out
class BertLayer(Layer):
def __init__(self, config, max_len):
super().__init__()
self.config = config
self.max_len = max_len
self.bert = TFBertModel.from_pretrained(bert_model_name, config=self.config)
def call(self, input):
return tf.map_fn(
lambda x: self.bert(x).last_hidden_state,
dtype=tf.int32,
elems=input,
fn_output_signature=tf.TensorSpec(shape=(None, self.max_len, 768), dtype=tf.float32)
)
class RecommenderClassifierModifiedTransformer():
def __init__(self, bert_config, recommender_config):
self.bert_config = bert_config
self.recommender_config = recommender_config
self.max_len = self.recommender_config.max_len
self.hidden_dim = self.recommender_config.hidden_dim
self.rate = self.recommender_config.rate
self.layernorm = LayerNormalization(epsilon=1e-6)
self.user_ids, self.user_token_types, self.user_masks, self.user_tower = self.__create_tower('user')
self.item_ids, self.item_token_types, self.item_masks, self.item_tower = self.__create_tower('item')
self.joined = Concatenate()([self.user_tower, self.item_tower])
self.dropout = Dropout(self.rate)(self.joined)
self.out1 = Dense(1)(self.dropout)
def __create_tower(self, name):
input_ids_layer = Input(shape=(None, self.max_len), name=f'{name}_input_ids', dtype=tf.int32)
token_type_ids_layer = Input(shape=(None, self.max_len), name=f'{name}_token_type_ids', dtype=tf.int32)
attention_mask_layer = Input(shape=(None, self.max_len), name=f'{name}_attention_masks', dtype=tf.int32)
bert_layer = BertLayer(self.bert_config, self.max_len)
input_embedding = bert_layer([input_ids_layer, attention_mask_layer, token_type_ids_layer])
transformer_layer = TransformerLayerModified(self.recommender_config)
input_embedding = transformer_layer(input_embedding)
mean_embedding = GlobalAveragePooling2D(name=f'{name}_mean')(input_embedding)
tower = Dense(self.hidden_dim, activation="relu", name=f'{name}_dense')(mean_embedding)
tower = self.layernorm(tower)
return input_ids_layer, token_type_ids_layer, attention_mask_layer, tower
def build_model(self):
dotproduct = Dot(axes=1)([self.user_tower, self.item_tower])
output = Add(name='label')([self.out1, dotproduct])
model = Model(inputs=[self.user_ids, self.user_token_types, self.user_masks,
self.item_ids, self.item_token_types, self.item_masks],
outputs=[output])
return model
# + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["1833b9ba674b46f69b785675766ac87f", "3208fe47d947415fa233cca15eec6493", "cf045f289fb84b1e88c831fec552079a", "591d42d1c4414fa2ba20d82d2f0eb4e5", "d8f16e9c9528412faad5f065f85b9215", "bea391ed0c3842d1a75ad0ee3e459a4c", "46ddcc491da548fbbe7a73a194bd8abc", "9adb54995baa4d1aa1e31e6fcef69231"]} id="vukHicHWNNxp" executionInfo={"elapsed": 52555, "status": "ok", "timestamp": 1617975590978, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05289514665097855798"}, "user_tz": 240} outputId="7f7447db-276e-43d0-d408-467b918d4ed9"
config = BertConfig()
config.output_hidden_states = True # set to True to obtain hidden states
rec_config = RecommenderConfig()
rec_config.max_len = MAX_LEN
rec_config.rate = 0.2
rec_config.num_heads = 2
rec_config.num_layers = 3
with strategy.scope():
loss_fn = tf.keras.losses.MeanSquaredError()
adam = tf.keras.optimizers.Adam(learning_rate=0.00002) #0.00002
mse_metrics = tf.keras.metrics.MeanSquaredError()
classifier = RecommenderClassifier(config, rec_config)
model = classifier.build_model()
model.compile(optimizer=adam, loss=loss_fn, metrics=[mse_metrics])
model.summary()
# + id="YQg8w40lRRYR"
# Tensorboard and checpoint callbacks
def tensorboard_checkpoint_callbacks(name):
if amazon:
tensorboard_dir = ''.join(['./logs/tensorboard/', name])
else:
tensorboard_dir = ''.join(['./yelp/logs/tensorboard/', name])
if not os.path.exists(tensorboard_dir):
os.makedirs(tensorboard_dir)
version = 1
dirs = [d for d in os.listdir(tensorboard_dir) if not d.startswith('.')]
if (len(dirs) > 0):
versions = np.asarray(list(map(lambda v: int(v[1:]), dirs)))
version = versions[versions.argsort()[::-1][0]]
version += 1
tensorboard_version_dir = os.path.join(tensorboard_dir, ''.join(['v', str(version)]))
print(tensorboard_version_dir)
tensorboard_callback = TensorBoard(log_dir=tensorboard_version_dir, histogram_freq=1)
if amazon:
checkpoint_dir = './logs/chkpoint'
else:
checkpoint_dir = './yelp/logs/chkpoint'
checkpoint_name_dir = os.path.join(checkpoint_dir, name, ''.join(['v', str(version)]))
if not os.path.exists(checkpoint_name_dir):
os.makedirs(checkpoint_name_dir)
checkpoint_file = os.path.join(checkpoint_name_dir, 'weights.best.hdf5')
print(checkpoint_file)
checkpoint_callback = ModelCheckpoint(checkpoint_file, monitor='val_loss', verbose=0, save_best_only=True,
save_weights_only=True)
return tensorboard_callback, checkpoint_callback, version
# + colab={"base_uri": "https://localhost:8080/"} id="E2x7up1k4fju" executionInfo={"elapsed": 652, "status": "ok", "timestamp": 1617975667730, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05289514665097855798"}, "user_tz": 240} outputId="49512d09-1593-4910-9483-b8412a8849bb"
name = 'finetuned-transformerv2'
tensorboard_callback, checkpoint_callback, version = tensorboard_checkpoint_callbacks(name)
lr_onplateau_callback = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, verbose=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="UD0oJTP9S4DE" executionInfo={"elapsed": 1042, "status": "ok", "timestamp": 1617975675437, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05289514665097855798"}, "user_tz": 240} outputId="97d77173-3982-4859-cfe8-4d2f079431e1"
# Early stopping callback
earlystop_callback = EarlyStopping(monitor='val_loss', min_delta=0.00005, patience=5)
# Learning rate scheduler callback
# optimizer (with 1-cycle-policy)
start_lr = 0.00001
min_lr = 0.00001
max_lr = 0.001 * strategy.num_replicas_in_sync
rampup_epochs = 3
sustain_epochs = 0
exp_decay = .8
def lrfn(epoch):
if epoch < rampup_epochs:
return (max_lr - start_lr)/rampup_epochs * epoch + start_lr
elif epoch < rampup_epochs + sustain_epochs:
return max_lr
else:
return (max_lr - min_lr) * exp_decay**(epoch-rampup_epochs-sustain_epochs) + min_lr
lr_callback = LearningRateScheduler(lambda epoch: lrfn(epoch), verbose=True)
rang = np.arange(NO_EPOCHS)
y = [lrfn(x) for x in rang]
plt.plot(rang, y)
print('Learning rate per epoch:')
# + [markdown] id="sZ01jpZfPwOl"
# ## 4. Model Fitting
#
# We train our recommendation classifier model with MSE loss function and Adam optimizer with initial learning rate of $2e^{-5}$. We also apply tensorboard, checkpoint and learning rate reduce on plateau with factor of $0.2$ and patience of $3$ as callbacks.
#
# We experimented with one-cycle-policy learning rate scheduler, however it appears that learning rate reduce on plateau performs better on fine-tuning approach.
# + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="uddzFE5TQTrB" outputId="7fc343aa-54f6-483f-a962-f3fade7f50df"
# %%time
train_size = len(train)
train_steps_per_epoch = train_size // BATCH_SIZE
val_size = len(val)
val_steps_per_epoch = val_size // BATCH_SIZE
history = model.fit(train_dataset.shuffle(BUFFER_SIZE),
batch_size=BATCH_SIZE,
steps_per_epoch=train_steps_per_epoch,
validation_data=val_dataset,
validation_steps=val_steps_per_epoch,
callbacks=[tensorboard_callback, checkpoint_callback, lr_onplateau_callback], # lr_callback, , earlystop_callback
epochs=NO_EPOCHS)
# + [markdown] id="ZootSuTcQAw5"
# ## 5. Model Evaluation
#
# We evaluate our model using hold-out test dataset to obtain the final MSE. Subsequently, we save our model's weights only on those that have better performance.
# + id="mUu6Uf6JeqrN"
mse, _ = model.evaluate(test_dataset)
print(f'Test MSE: {mse}')
# + id="CJcn676GSq87"
model_dir = './model'
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_file = ''.join([model_dir, '/recommender_', name, '_32_v', str(version), '.h5'])
model.save_weights(model_file, save_format='h5')
# + [markdown] id="s03-fUyiY55v"
# ## 6. Error Analysis
#
# We analyze our model performance further by looking at top-10 worst MSE on our Amazon dataset error prediction. We found very interesting occurences, where most of bad predictions are things that would not have been predicted on reviews. For instance, all the reviews of the movies are good but the person gave it a bad review because there was a problem with shipping or price or they have a specific issue around topics like sexuality or violence that wasn’t a problem for other viewers. Also some that seem like missed-clicks, where the person gave a positive written review but a low numeric score. There are also some that just seem hard to predict because this one user thought the movie was boring but other people like it, but they’re not as common as we would have thought.
# + id="J4yx0oeCY5Ws"
## Load best performance model
weights = './model/recommender_finetuned-transformerv2_32_v6.h5'
model.load_weights(weights)
# + colab={"base_uri": "https://localhost:8080/"} id="RoXtNo3W872f" executionInfo={"elapsed": 128882, "status": "ok", "timestamp": 1617652016677, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05289514665097855798"}, "user_tz": 240} outputId="652444a7-045f-4cf3-faa8-927199a66c27"
y_pred = model.predict(test_dataset)
# + id="QIbAI8TVAWZh"
error = y_pred - eval[:,4].reshape(-1, 1)
square_error = np.square(error)
_eval = np.column_stack((eval, y_pred, error, square_error))
eval_df = pd.DataFrame(_eval, columns=['reviewerID', 'asin', 'userReviews', 'itemReviews', 'y_true', 'y_pred', 'error', 'square_error'])
# + id="ZmBYXWJTmh35"
input_pkl = '../dataset/reviews_25-65_tokens_with_user-item_counts.pkl'
features = ['reviewerID', 'asin', 'reviewText']
reviews = pd.read_pickle(input_pkl)[features]
# + id="vZ7vjesJr-Lx"
eval_df = eval_df.merge(reviews, on=['reviewerID', 'asin'], how='left')
eval_df = eval_df[eval_df.columns[:9]]
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="RtwVholRsUua" executionInfo={"elapsed": 864, "status": "ok", "timestamp": 1617655431175, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05289514665097855798"}, "user_tz": 240} outputId="6537118e-2548-4772-e21d-6acfa91adb15"
display(HTML(eval_df.sort_values(by='square_error', ascending=False).head(10).to_html()))
| bert-transformer_fine-tuning_training_colab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# All content can be freely used and adapted under the terms of the
# [Creative Commons Attribution 4.0 International License](http://creativecommons.org/licenses/by/4.0/).
#
# ![Creative Commons License](https://i.creativecommons.org/l/by/4.0/88x31.png)
#
# Agradecimentos especiais ao [<NAME>](www.leouieda.com)
# Esse documento que você está usando é um [Jupyter notebook](http://jupyter.org/). É um documento interativo que mistura texto (como esse), código (como abaixo), e o resultado de executar o código (números, texto, figuras, videos, etc).
# # Interpolação, mapas e a gravidade da Terra
# ## Objetivos
#
# * Entender a influência da interpolação na geração de mapas de dados geofísicos
# * Visualizar as variações geográficas da gravidade da Terra
# * Entender como a escala de cores utilizada nos mapas influencia nossa interpretação
# * Aprender quais são os fatores que devem ser considerados quando visualizamos um dado em mapa
# ## Instruções
#
# O notebook te fornecerá exemplos interativos que trabalham os temas abordados no questionário. Utilize esses exemplos para responder as perguntas.
#
# As células com números ao lado, como `In [1]:`, são código [Python](http://python.org/). Algumas dessas células não produzem resultado e servem de preparação para os exemplos interativos. Outras, produzem gráficos interativos. **Você deve executar todas as células, uma de cada vez**, mesmo as que não produzem gráficos.
#
# Para executar uma célula, clique em cima dela e aperte `Shift + Enter`. O foco (contorno verde ou cinza em torno da célula) deverá passar para a célula abaixo. Para rodá-la, aperte `Shift + Enter` novamente e assim por diante. Você pode executar células de texto que não acontecerá nada.
# ## Preparação
#
# Exectute as células abaixo para carregar as componentes necessárias para nossa prática. Vamos utilizar várias *bibliotecas*, inclusive uma de geofísica chamada [Fatiando a Terra](http://www.fatiando.org).
# %matplotlib inline
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import ipywidgets as widgets
from IPython.display import display
import seaborn
from fatiando import utils, gridder
import fatiando
from icgem import load_icgem_gdf, down_sample
# ## Interpolação
# O melhor jeito de entendermos o efeito da interpolação é fabricando alguns dados fictícios (sintéticos).
# Assim, podemos gerar os dados tanto em pontos aleatórios quanto em um grid regular.
# Isso nos permite comparar os resultados da interpolação com o *verdadeiro*. Nosso verdadeiro será um conjunto de dados medidos em um grid regular. Como se tivéssemos ido ao campo e medido em um grid regular.
# Rode a célula abaixo para gerar os dados em pontos aleatórios e em um grid regular.
area = (-5000., 5000., -5000., 5000.)
shape = (100, 100)
xp, yp = gridder.scatter(area, 100, seed=6)
x, y = [i.reshape(shape) for i in gridder.regular(area, shape)]
aletatorio = 50*utils.gaussian2d(xp, yp, 10000, 1000, angle=45)
regular = 50*utils.gaussian2d(x, y, 10000, 1000, angle=45).reshape(shape)
# Rode as duas células abaixo para gerar um gráfico interativo. Nesse gráfico você poderá controlar:
#
# * O número de pontos (em x e y) do grid utilizado na interpolação (`num_pontos`)
# * O método de interpolação utilizado (`metodo`). Pode ser interpolação cúbica ou linear.
# * Mostrar ou não os pontos de medição aleatórios no mapa interpolado.
#
# **Repare no que acontece com as bordas do mapa e onde não há observações**.
def interpolacao(num_pontos, metodo, pontos_medidos):
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
ishape = (num_pontos, num_pontos)
tmp = gridder.interp(yp, xp, aletatorio, ishape, area=area, algorithm=metodo, extrapolate=True)
yi, xi, interp = [i.reshape(ishape) for i in tmp]
ranges = np.abs([interp.min(), interp.max()]).max()
kwargs = dict(cmap="RdBu_r", vmin=-ranges, vmax=ranges)
ax = axes[0]
ax.set_title(u'Pontos medidos')
ax.set_aspect('equal')
tmp = ax.scatter(yp*0.001, xp*0.001, s=80, c=aletatorio, **kwargs)
plt.colorbar(tmp, ax=ax, aspect=50, pad=0.01)
ax.set_xlabel('y (km)')
ax.set_ylabel('x (km)')
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
plt.tight_layout(pad=0)
ax = axes[1]
ax.set_title(u'Interpolado')
ax.set_aspect('equal')
tmp = ax.contourf(yi*0.001, xi*0.001, interp, 40, **kwargs)
plt.colorbar(tmp, ax=ax, aspect=50, pad=0.01)
if pontos_medidos:
ax.plot(yp*0.001, xp*0.001, '.k')
ax.set_xlabel('y (km)')
ax.set_ylabel('x (km)')
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
plt.tight_layout(pad=0)
w = widgets.interactive(interpolacao, num_pontos=(5, 100, 5), metodo=['cubic', 'linear'], pontos_medidos=False)
display(w)
# Vamos verificar se alguma das combinações chegou perto do resultado *verdadeiro*.
#
# Rode a célula abaixo para gerar um gráfico dos dados verdadeiros (gerados em um grid regular). Esse deveria ser o resultado observado se a interpolação fosse perfeita.
fig, ax = plt.subplots(1, 1, figsize=(7, 6))
ranges = np.abs([regular.min(), regular.max()]).max()
kwargs = dict(cmap="RdBu_r", vmin=-ranges, vmax=ranges)
ax.set_title(u'Verdadeiro')
ax.set_aspect('equal')
tmp = ax.contourf(y*0.001, x*0.001, regular, 40, **kwargs)
plt.colorbar(tmp, ax=ax, aspect=50, pad=0.01)
ax.plot(yp*0.001, xp*0.001, '.k')
ax.set_xlabel('y (km)')
ax.set_ylabel('x (km)')
plt.tight_layout(pad=0)
# # Gravidade do mundo
# Vamos visualizar como a gravidade da Terra varia geograficamente. Os dados da gravidade do mundo foram baixados de http://icgem.gfz-potsdam.de/ICGEM/potato/Service.html usando o modelo EIGEN-6c3stat.
#
# **As medições foram feitas em cima da superfície da Terra**, ou seja, acompanhando a topografia.
# Rode as células abaixo para carregar os dados.
dados = load_icgem_gdf('data/eigen-6c3stat-0_5-mundo.gdf')
lat, lon, grav = dados['latitude'], dados['longitude'], dados['gravity_earth']
# Vamos fazer um mapa da gravidade utilizando a [projeção Mollweid](http://en.wikipedia.org/wiki/Map_projection). Esses dados estão em mGal: 1 mGal = 10⁻⁵ m/s².
#
# Rode as duas células abaixo para gerar o gráfico (isso pode demorar um pouco).
bm = Basemap(projection='moll', lon_0=0, resolution='c')
x, y = bm(lon, lat)
plt.figure(figsize=(18, 10))
tmp = bm.contourf(x, y, grav, 100, tri=True, cmap='Reds')
plt.colorbar(orientation='horizontal', pad=0.01, aspect=50, shrink=0.5).set_label('mGal')
plt.title("Gravidade medida na superficie da Terra", fontsize=16)
# ## Escala de cor
#
# A escala de cores que utilizamos para mapear os valores pode ter um impacto grande na nossa interpretação dos resultados. Abaixo, veremos como o nosso dado de gravidade mundial fica quando utilizamos diferentes escalas de cor.
#
# As escalas podem ser divididas em 3 categorias:
#
# * lineares: as cores variam de um tom claro (geralmente branco) a uma cor (por exemplo, vermelho) de maneira linear
# * divergente: as cores variam de uma cor escura, passando por um tom claro (geralmente branco), e depois para outra cor escura.
# * raindow ou qualitativos: as cores variam sem um padrão de intensidade claro. Podem ser as cores do arco-íris ou outra combinação.
#
# Nas escalas lineares e divergentes, as cores sempre variam de baixa intensidade para alta intensidade (e vice-versa para escalas divergentes).
# Rode as células abaixo para gerar um mapa interativo da gravidade mundial. Você poderá controlar qual escala de cor você quer usar. Experimente com elas e veja como elas afetam sua percepção.
#
# **Para pensar**: Como isso pode afetar alguem que é [daltônico](https://pt.wikipedia.org/wiki/Daltonismo)?
def grav_mundial(escala_de_cor):
plt.figure(figsize=(18, 10))
tmp = bm.contourf(x, y, grav, 100, tri=True, cmap=escala_de_cor)
plt.colorbar(orientation='horizontal', pad=0.01, aspect=50, shrink=0.5).set_label('mGal')
plt.title("Escala de cor: {}".format(escala_de_cor), fontsize=16)
escalas = 'Reds Blues Greys YlOrBr RdBu BrBG PRGn Dark2 jet ocean rainbow gnuplot'.split()
w = widgets.interactive(grav_mundial, escala_de_cor=escalas)
display(w)
# # A Terra Normal e o distúrbio da gravidade
# ## Objetivos
#
# * Aprender a calcular a gravidade da Terra Normal e o distúrbio da gravidade
# * Gerar mapas do distúrbio para o mundo todo
# * Entender a relação entre o distúrbio e a isostasia
# * Observar o estado de equilíbrio isostático em diferentes regiões do planeta
# ## A Terra Normal
# "Terra Normal" é o nome que damos ao elipsóide de referência utilizado para o cálculo de anomalias da gravidade. Um elipsóide geralmente utilizado é o [WGS84](http://en.wikipedia.org/wiki/World_Geodetic_System).
#
# Existem fórmulas para calcular a gravidade (lembre-se que gravidade = gravitação + centrífuga) de um elipsóide em qualquer ponto fora dele. Porém, essas fórmulas são mais complicadas do que queremos para essa aula. Uma alternativa é utilizar a fórmula de Somigliana:
#
# $$
# \gamma(\varphi) = \frac{a \gamma_a \cos^2 \varphi + b \gamma_b \sin^2 \varphi}{\sqrt{a^2 \cos^2 \varphi + b^2 \sin^2 \varphi}}
# $$
#
# $\gamma$ é a gravidade do elipsóide calculada na latitude $\varphi$ e **sobre a superfície do elipsóide** (ou seja, altitude zero).
# $a$ e $b$ são os eixos maior e menor do elipsóide, $\gamma_a$ e $\gamma_b$ são a gravidade do elipsóide no equador e nos polos. Os valores de $a$, $b$, $\gamma_a$ e $\gamma_b$ são tabelados para cada elipsóide. Os valores abaixo são referentes ao WGS84:
#
# <table>
# <tr> <th> a </th> <td> 6378137 </td> <td> metros </td> </tr>
# <tr> <th> b </th> <td> 6356752.3142 </td> <td> metros </td> </tr>
# <tr> <th> $\gamma_a$ </th> <td> 9.7803253359 </td> <td> m/s² </td> </tr>
# <tr> <th> $\gamma_b$ </th> <td> 9.8321849378 </td> <td> m/s² </td> </tr>
# </table>
#
# Os valores foram retirados do livro:
#
# > <NAME>., and <NAME> (2006), Physical Geodesy, 2nd, corr. ed. 2006 edition., Springer, Wien ; New York.
# ### Carregando os dados e fazendo um mapa
# Depois de calcular os valores acima, precisamos carregá-los aqui no notebook para gerarmos os mapas.
#
# Primeiro, coloque o nome do seu arquivo `.csv` abaixo e execute a célula.
arquivo_dados = 'data/somigliana.csv'
# Agora, execute as células abaixo para carregar os dados e gerar um mapa com os valores que você calculou.
lon, lat, gamma = np.loadtxt(arquivo_dados, delimiter=',', unpack=True, skiprows=0, usecols=[0, 1, -1])
bm = Basemap(projection='moll', lon_0=0, resolution='c')
x, y = bm(lon, lat)
plt.figure(figsize=(18, 10))
tmp = bm.contourf(x, y, gamma, 100, tri=True, cmap='Reds')
plt.colorbar(orientation='horizontal', pad=0.01, aspect=50, shrink=0.5).set_label('mGal')
plt.title(r"Gravidade da Terra Normal ($\gamma$)", fontsize=16)
# ### Cáculo da Terra Normal no ponto de observação ($\gamma_P$)
# A fórmula de Somgliana nos dá a gravidade da Terra Normal calculada sobre o elipsóide. Nós precisamos de $\gamma$ calculado no ponto onde medimos a gravidade (P) para calcular o distúrbio. Para obter $\gamma_P$, nós podemos utilizar a **correção de ar-livre**. Essa correção nos dá uma approximação de $\gamma_P$:
#
# $$ \gamma_P \approx \gamma - 0.3086 H $$
#
# em que $H$ é a altitude em relação ao elipsóide (altitude geométrica) em **metros**. Lembrando que a correção é feita em **mGal**.
#
# Rode as células abaixo para carregar os dados de $\gamma_P$ e gerar um mapa.
arquivo_dados = 'data/freeair.csv'
gamma_p = np.loadtxt(arquivo_dados, delimiter=',', unpack=True, skiprows=0, usecols=[-1])
plt.figure(figsize=(18, 10))
tmp = bm.contourf(x, y, gamma_p, 100, tri=True, cmap='Reds')
plt.colorbar(orientation='horizontal', pad=0.01, aspect=50, shrink=0.5).set_label('mGal')
plt.title(r"Gravidade da Terra Normal em P ($\gamma_P$)", fontsize=16)
# ## Distúrbio da gravidade
# O distúrbio da gravidade é definido como:
#
# $$ \delta = g_P - \gamma_P$$
#
# em que $g_P$ é a gravidade medida no ponto P.
#
# Rode as células abaixo para carregar os valores calculados e gerar o mapa.
arquivo_dados = 'data/residual.csv'
disturbio = np.loadtxt(arquivo_dados, delimiter=',', unpack=True, skiprows=0, usecols=[-1])
def varia_escala(escala_de_cor):
plt.figure(figsize=(18, 10))
ranges = np.abs([disturbio.min(), disturbio.max()]).max()
tmp = bm.contourf(x, y, disturbio, 100, tri=True, cmap=escala_de_cor, vmin=-ranges, vmax=ranges)
plt.colorbar(orientation='horizontal', pad=0.01, aspect=50, shrink=0.5).set_label('mGal')
plt.title(u"Distúrbio da gravidade (escala de cor '{}')".format(escala_de_cor), fontsize=16)
escalas = 'Reds Blues Greys YlOrBr RdBu_r BrBG PRGn Dark2 jet ocean rainbow gnuplot'.split()
w = widgets.interactive(varia_escala, escala_de_cor=escalas)
display(w)
# # Isostasia e anomalia Bouguer
# ## Objetivos
#
# * Visualizar os mecanismos de compensação isostática de Airy e Pratt
# * Cacular e visualizar a anomalia Bouguer para o mundo todo
# ## Anomalia Bouguer
# Na prática passada, vocês calcularam o distúrbio da gravidade ($\delta$) removendo a gravidade da Terra Normal calculada no ponto de observação ($\gamma_P$). Vimos que o distúrbio nos indica o estado de equilíbrio isostático da região: se $\delta$ for pequeno e positivo a região encontra-se em equilíbro, caso contrário não está. A falta de equilíbrio isostático sugere que existem forças externas erguendo ou abaixando a topografia.
#
# Se quisermos ver o efeito gravitacional de coisas abaixo da topografia (Moho, bacias sedimentares e outras heterogeneidades), precisamos **remover o efeito gravitacional da topografia** do distúrbio. Para isso, precisamos calcular a atração gravitacional da massa topográfica (vamos chamar isso de $g_t$). A **anomalia Bouguer** é o distúrbio da gravidade menos o efeito da topografia:
#
# $$\Delta g_{bg} = \delta - g_t$$
#
# Um jeito simples de calcular $g_t$ é através de uma aproximação. Nesse caso, vamos aproximar toda a massa topográfica em baixo do ponto onde medimos a gravidade (P) por um platô infinito (o *platô de Bouguer*). Se a topografia abaixo do ponto P tem $H$ metros de **altitude em relação ao elipsóide**, podemos aproximar $g_t$ por:
#
# $$g_t \approx 2 \pi G \rho H$$
#
# em que $\rho$ é a densidade da topografia e $G$ é a contante gravitacional.
#
# Nos oceanos, não temos topografia acima do elipsóide. Porém, temos uma camada de água que não foi removida devidamente com a Terra Normal ($\gamma_P$). Podemos utilizar a aproximação do platô de Bouguer para calcular o efeito gravitacional da camada de água e removê-la do distúrbio. Assim, teremos a anomalia Bouguer para regiões continentais e oceânicas.
# ### Calculando a anomalia Bouguer
# Para fazer os cálculos, vamos precisar o valor da altitude topográfica. Nos continentes, essa altitude é a mesma da altitude na qual os dados foram medidos. Já nos oceanos, a altitude de medição é zero (superfície da água). O que precisamos realmente é da batimetria nos oceanos. Por sorte, existem modelos digitais de terreno, como o [ETOPO1](http://www.ngdc.noaa.gov/mgg/global/global.html) que nos dão topografia nos continentes e batimetria nos oceanos. O arquivo `data/etopo1-0_5-mundo.gdf` contem os dados de topografia do ETOPO1 calculado nos mesmo pontos em que a gravidade foi medida.
#
# **Dicas** para calcular o efeito gravitacional da topografia utilizando o platô de Bouguer.
#
# * Utilize a densidade $\rho_c = 2670\ kg/m^3$ para a topografia.
# * Nos oceanos, utilize a densidade $\rho_c$ para a crosta do elipsóide e $\rho_a = 1040\ kg/m^3$ para a água do mar.
# * Utilize o valor de $G = 0.00000000006673\ m^3 kg^{-1} s^{-1}$
# * O valor calculado estará em m/s². Converta para mGal = 100000 m/s²
# ### Carregando os dados e fazendo um mapa
#
# Depois de calcular os valores acima, precisamos carregá-los aqui no notebook para gerarmos os mapas.
#
# Primeiro, coloque o nome do seu arquivo `.csv` abaixo e execute a célula. **O nome deve ser exato**. Dica: apague o nome do arquivo e aperte Tab.
arquivo_dados = 'data/bouguer.csv'
# Agora, execute as células abaixo para carregar os dados e gerar um mapa com os valores que você calculou.
lon, lat, bouguer = np.loadtxt(arquivo_dados, delimiter=',', unpack=True, skiprows=0, usecols=[0, 1, -1])
bm = Basemap(projection='moll', lon_0=0, resolution='c')
x, y = bm(lon, lat)
plt.figure(figsize=(18, 10))
ranges = np.abs([bouguer.min(), bouguer.max()]).max()
tmp = bm.contourf(x, y, bouguer, 100, tri=True, cmap='RdBu_r', vmin=-ranges, vmax=ranges)
plt.colorbar(orientation='horizontal', pad=0.01, aspect=50, shrink=0.5).set_label('mGal')
plt.title(r"Anomalia Bouguer", fontsize=16)
# # Inversão de dados de uma bacia sedimentar 2D poligonal
#
# ## Objetivos
#
# * Entender melhor como funciona a inversão de dados
# A célula abaixo _prepara_ o ambiente
from fatiando.inversion import Smoothness1D
from fatiando.gravmag.basin2d import PolygonalBasinGravity
from fatiando.gravmag import talwani
from fatiando.mesher import Polygon
from fatiando.vis import mpl
from fatiando import utils
import numpy as np
# A célula abaixo cria dados sintéticos para testar a inversão de dados. O resultado será um polígono.
noise = 5
# Make some synthetic data to test the inversion
# The model will be a polygon.
# Reverse x because vertices must be clockwise.
xs = np.linspace(0, 100000, 100)[::-1]
depths = (-1e-15*(xs - 50000)**4 + 8000 -
3000*np.exp(-(xs - 70000)**2/(10000**2)))
depths -= depths.min() # Reduce depths to zero
props = {'density': -300}
model = Polygon(np.transpose([xs, depths]), props)
x = np.linspace(0, 100000, 100)
z = -100*np.ones_like(x)
data = utils.contaminate(talwani.gz(x, z, [model]), noise, seed=0)
# A célula abaixo executa a inversão, dada as condições iniciais descritas em `initial`
# Make the solver using smoothness regularization and run the inversion
misfit = PolygonalBasinGravity(x, z, data, 50, props, top=0)
regul = Smoothness1D(misfit.nparams)
solver = misfit + 1e-4*regul
# This is a non-linear problem so we need to pick an initial estimate
initial = 3000*np.ones(misfit.nparams)
solver.config('levmarq', initial=initial).fit()
# A célula abaixo cria a imagem da bacia e mostra o ajuste dos dados
# %matplotlib inline
mpl.figure()
mpl.subplot(2, 1, 1)
mpl.plot(x, data, 'ok', label='observed')
mpl.plot(x, solver[0].predicted(), '-r', linewidth=2, label='predicted')
mpl.legend()
ax = mpl.subplot(2, 1, 2)
mpl.polygon(model, fill='gray', alpha=0.5, label='True')
# The estimate_ property of our solver gives us the estimate basin as a polygon
# So we can directly pass it to plotting and forward modeling functions
mpl.polygon(solver.estimate_, style='o-r', label='Estimated')
ax.invert_yaxis()
mpl.legend()
mpl.show()
| notebooks/gravmetria/GRAV_somigliana_ar-livre_bouguer-v02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# language: python
# name: python38564bit3d4e4e887ecb4475a101c3f1a65de3f9
# ---
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
import tensorflow as tf
# <h3>Horizontal Projection
def horizontal_projection(thresh1):
row_matrix = [] #it containing all row segements
segemetation = []
count = 0
i = 0
copy_image = thresh1
for p in range(thresh1.shape[0]):
if(copy_image[i].sum() == 0):
count = count + 1
i = i+1
if(count == 5):
segemetation.append(copy_image[0:i,:])
copy_image = np.delete(copy_image,np.s_[0:i], axis = 0)
count = 0
i = 0
else:
count = 0
i = i + 1
m = len(segemetation)
for i in range(m):
if(segemetation[i].sum() > 0):
row_matrix.append(segemetation[i])
return row_matrix
# <h3>Vertical Projection|
def vertical_projection(image):
row_matrix = []
row = image
segmentation = []
count = 0
i = 0
for p in range(image.shape[1]):
if(row[:,i].sum() == 0):
count += 1
i += 1
if(count == 5):
segmentation.append(row[:,0:i])
row = np.delete(row,np.s_[0:i],axis = 1)
count = 0
i = 0
else:
count = 0
i += 1
m = len(segmentation)
for i in range(m):
if(segmentation[i].sum() > 0):
row_matrix.append(segmentation[i])
return row_matrix
# <h3> Image crop
def crop(thresh1):
#Top adjusting
try:
i = 0
while(thresh1[i].sum() == 0):
if(thresh1[i+8].sum() == 0):
thresh1 = np.delete(thresh1,0,0)
else:
break
except:
pass
#Bottom adjusting
try:
i = thresh1.shape[0] -1
while(thresh1[i].sum() == 0):
if(thresh1[i-8].sum() == 0):
thresh1 = np.delete(thresh1,i,0)
i = i -1
else:
break
except:
pass
#right part of matrix is adjusting
try:
i = thresh1.shape[1] -1
s = thresh1.sum(axis = 0)
p = -1
while(s[p] == 0):
if(s[p-8] == 0):
thresh1 = np.delete(thresh1,-1, axis = 1)
s = np.delete(s, -1, axis=0)
else:
break
except:
pass
#left side adusting
try:
i = thresh1.shape[1] -1
s = thresh1.sum(axis = 0)
p = 0
while(s[p] == 0):
if(s[p + 8] == 0):
thresh1 = np.delete(thresh1,1, axis = 1)
s = np.delete(s, 1, axis=0)
else:
break
except:
pass
return thresh1
# <h3>Apply Threshold inverse
image = cv.imread('3.jpeg',0)
#ret,thresh1 = cv.threshold(image, 127, 255, cv.THRESH_BINARY_INV)
#(thresh, thresh1) = cv.threshold(image, 128, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)
thresh1 = cv.threshold(image, 127, 255, cv.THRESH_BINARY_INV)[1]
#thresh1 = tf.keras.utils.normalize(thresh1, axis = 1)
kernel = np.ones((6,6),np.uint8)
opening = cv.morphologyEx(thresh1, cv.MORPH_OPEN, kernel)
closing = cv.morphologyEx(thresh1, cv.MORPH_CLOSE, kernel)
thresh = crop(opening)
#newimg = tf.keras.utils.normalize(thresh1, axis = 1)
plt.imshow(thresh1)
#cv.imshow('Original',image)
#cv.waitKey()
# <h3>Matrix Prediction
matrix = []
horizontal = horizontal_projection(thresh)
#cv.imshow('Original',horizontal[2])
#cv.waitKey()
no_rows = len(horizontal)
for i in range(no_rows):
vertical = vertical_projection(horizontal[i])
no_colmun = len(vertical)
for j in range(no_colmun):
vertical[j] = crop(vertical[j])
matrix.append(vertical)
#cv.imshow('Original',matrix[2][2])
#cv.waitKey()
#from tensorflow.keras.models import load_model
#import tensorflow as tf
model = load_model('cnn_model.h5')
my_rows, my_cols = (no_rows, no_colmun)
my_array = [[0]*my_cols]*my_rows
my_array = np.array(my_array)
for i in range(3):
for j in range(3):
th = matrix[i][j]
resized = cv.resize(th,(28,28), interpolation = cv.INTER_AREA)
newimg = tf.keras.utils.normalize(resized, axis = 1)
newimg = np.array(newimg).reshape(-1,28,28,1)
prediction = model.predict(newimg)
value = np.argmax(prediction)
my_array[i][j] = value
print('Predicted matrix is')
my_array
from numpy import linalg as LA
w, v = LA.eig(my_array)
print("Eigen values :")
print(w)
print('Eigen Vectors :')
print(v)
print("Transpose")
print(np.transpose(my_array))
print("Inverse")
try:
print(np.linalg.inv(my_array))
except:
print('Matrix is sigular, Can not find Inverse')
print("Determinant : ")
print(np.linalg.det(my_array))
| Data Preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pydeck Earth Engine Introduction
#
# This is an introduction to using [Pydeck](https://pydeck.gl) and [Deck.gl](https://deck.gl) with [Google Earth Engine](https://earthengine.google.com/) in Jupyter Notebooks.
# If you wish to run this locally, you'll need to install some dependencies. Installing into a new Conda environment is recommended. To create and enter the environment, run:
# ```
# conda create -n pydeck-ee -c conda-forge python jupyter notebook pydeck earthengine-api requests -y
# source activate pydeck-ee
# jupyter nbextension install --sys-prefix --symlink --overwrite --py pydeck
# jupyter nbextension enable --sys-prefix --py pydeck
# ```
# then open Jupyter Notebook with `jupyter notebook`.
# Now in a Python Jupyter Notebook, let's first import required packages:
from pydeck_earthengine_layers import EarthEngineLayer
import pydeck as pdk
import requests
import ee
# ## Authentication
#
# Using Earth Engine requires authentication. If you don't have a Google account approved for use with Earth Engine, you'll need to request access. For more information and to sign up, go to https://signup.earthengine.google.com/.
# If you haven't used Earth Engine in Python before, you'll need to run the following authentication command. If you've previously authenticated in Python or the command line, you can skip the next line.
#
# Note that this creates a prompt which waits for user input. If you don't see a prompt, you may need to authenticate on the command line with `earthengine authenticate` and then return here, skipping the Python authentication.
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# ## Create Map
#
# Next it's time to create a map. Here we create an `ee.Image` object
# Initialize objects
ee_layers = []
view_state = pdk.ViewState(latitude=37.7749295, longitude=-122.4194155, zoom=10, bearing=0, pitch=45)
# +
# %%
# Add Earth Engine dataset
# traditional python string
print('Hello world!')
# Earth Eninge object
print(ee.String('Hello World from Earth Engine!').getInfo())
print(ee.Image('LANDSAT/LC08/C01/T1/LC08_044034_20140318').getInfo())
# -
# Then just pass these layers to a `pydeck.Deck` instance, and call `.show()` to create a map:
r = pdk.Deck(layers=ee_layers, initial_view_state=view_state)
r.show()
| GetStarted/01_hello_world.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Language Translation
# In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.
# ## Get the Data
# Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.
# +
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
# -
# ## Explore the Data
# Play around with view_sentence_range to view different parts of the data.
# +
view_sentence_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
# -
# ## Implement Preprocessing Function
# ### Text to Word Ids
# As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function `text_to_ids()`, you'll turn `source_text` and `target_text` from words to ids. However, you need to add the `<EOS>` word id at the end of each sentence from `target_text`. This will help the neural network predict when the sentence should end.
#
# You can get the `<EOS>` word id by doing:
# ```python
# target_vocab_to_int['<EOS>']
# ```
# You can get other word ids using `source_vocab_to_int` and `target_vocab_to_int`.
# +
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
"""
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
"""
source_id_text = [[source_vocab_to_int[word] for word in sent.split()] for sent in source_text.split("\n")]
target_id_text = [[target_vocab_to_int[word] for word in (sent + ' <EOS>').split()] for sent in target_text.split("\n")]
return (source_id_text, target_id_text)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_text_to_ids(text_to_ids)
# -
# ### Preprocess all the data and save it
# Running the code cell below will preprocess all the data and save it to file.
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
# # Check Point
# This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
# +
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
# -
# ### Check the Version of TensorFlow and Access to GPU
# This will check to make sure you have the correct version of TensorFlow and access to a GPU
# +
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
# -
# ## Build the Neural Network
# You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
# - `model_inputs`
# - `process_decoding_input`
# - `encoding_layer`
# - `decoding_layer_train`
# - `decoding_layer_infer`
# - `decoding_layer`
# - `seq2seq_model`
#
# ### Input
# Implement the `model_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:
#
# - Input text placeholder named "input" using the TF Placeholder name parameter with rank 2.
# - Targets placeholder with rank 2.
# - Learning rate placeholder with rank 0.
# - Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0.
#
# Return the placeholders in the following the tuple (Input, Targets, Learing Rate, Keep Probability)
# +
def model_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
"""
# TODO: Implement Function
input = tf.placeholder(tf.int32, shape=(None, None), name='input')
targets = tf.placeholder(tf.int32, shape=(None, None))
lr = tf.placeholder(tf.float32)
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return (input, targets, lr, keep_prob)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_inputs(model_inputs)
# -
# ### Process Decoding Input
# Implement `process_decoding_input` using TensorFlow to remove the last word id from each batch in `target_data` and concat the GO ID to the beginning of each batch.
# +
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for decoding
:param target_data: Target Placeholder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
# TODO: Implement Function
ending = tf.strided_slice(target_data, begin=[0, 0], end=[batch_size, -1], strides=[1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
return dec_input
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_process_decoding_input(process_decoding_input)
# -
# ### Encoding
# Implement `encoding_layer()` to create a Encoder RNN layer using [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn).
# +
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
"""
# TODO: Implement Function
enc_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size) for _ in range(num_layers)])
dropout = tf.contrib.rnn.DropoutWrapper(enc_cell, output_keep_prob=keep_prob)
_, enc_state = tf.nn.dynamic_rnn(dropout, rnn_inputs, dtype=tf.float32)
return enc_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_encoding_layer(encoding_layer)
# -
# ### Decoding - Training
# Create training logits using [`tf.contrib.seq2seq.simple_decoder_fn_train()`](https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/contrib/seq2seq/simple_decoder_fn_train) and [`tf.contrib.seq2seq.dynamic_rnn_decoder()`](https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/contrib/seq2seq/dynamic_rnn_decoder). Apply the `output_fn` to the [`tf.contrib.seq2seq.dynamic_rnn_decoder()`](https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/contrib/seq2seq/dynamic_rnn_decoder) outputs.
# +
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
"""
# TODO: Implement Function
# drop out
dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob)
# generates a decoder fn
dynamic_fn_train = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state)
outputs_train, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(
cell=dec_cell, decoder_fn=dynamic_fn_train, inputs=dec_embed_input,
sequence_length=sequence_length, scope=decoding_scope
)
# Apply output function
train_logits = output_fn(outputs_train)
return train_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_train(decoding_layer_train)
# -
# ### Decoding - Inference
# Create inference logits using [`tf.contrib.seq2seq.simple_decoder_fn_inference()`](https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/contrib/seq2seq/simple_decoder_fn_inference) and [`tf.contrib.seq2seq.dynamic_rnn_decoder()`](https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/contrib/seq2seq/dynamic_rnn_decoder).
# +
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: The maximum allowed time steps to decode
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
"""
# TODO: Implement Function
dynamic_decoder_fn_inf = tf.contrib.seq2seq.simple_decoder_fn_inference(
output_fn, encoder_state, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, maximum_length - 1, vocab_size)
inference_logits, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, dynamic_decoder_fn_inf, scope=decoding_scope)
return inference_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_infer(decoding_layer_infer)
# -
# ### Build the Decoding Layer
# Implement `decoding_layer()` to create a Decoder RNN layer.
#
# - Create RNN cell for decoding using `rnn_size` and `num_layers`.
# - Create the output fuction using [`lambda`](https://docs.python.org/3/tutorial/controlflow.html#lambda-expressions) to transform it's input, logits, to class logits.
# - Use the your `decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob)` function to get the training logits.
# - Use your `decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size, decoding_scope, output_fn, keep_prob)` function to get the inference logits.
#
# Note: You'll need to use [tf.variable_scope](https://www.tensorflow.org/api_docs/python/tf/variable_scope) to share variables between training and inference.
# +
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
"""
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
"""
# TODO: Implement Function
# dec cell
dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size) for _ in range(num_layers)])
with tf.variable_scope("decoding") as decoding_scope:
# output layer, None for linear act. fn
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
train_logits = decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob)
with tf.variable_scope("decoding", reuse=True) as decoding_scope:
inf_logits = decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, target_vocab_to_int['<GO>'],
target_vocab_to_int['<EOS>'], sequence_length,
vocab_size, decoding_scope, output_fn, keep_prob)
return train_logits, inf_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer(decoding_layer)
# -
# ### Build the Neural Network
# Apply the functions you implemented above to:
#
# - Apply embedding to the input data for the encoder.
# - Encode the input using your `encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob)`.
# - Process target data using your `process_decoding_input(target_data, target_vocab_to_int, batch_size)` function.
# - Apply embedding to the target data for the decoder.
# - Decode the encoded input using your `decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob)`.
# +
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
"""
# TODO: Implement Function
enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, enc_embedding_size)
enc_state = encoding_layer(enc_embed_input, rnn_size, num_layers, keep_prob)
dec_input = process_decoding_input(target_data, target_vocab_to_int, batch_size)
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
train_logits, inf_logits = decoding_layer(dec_embed_input, dec_embeddings, enc_state, target_vocab_size,
sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob)
return train_logits, inf_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_seq2seq_model(seq2seq_model)
# -
# ## Neural Network Training
# ### Hyperparameters
# Tune the following parameters:
#
# - Set `epochs` to the number of epochs.
# - Set `batch_size` to the batch size.
# - Set `rnn_size` to the size of the RNNs.
# - Set `num_layers` to the number of layers.
# - Set `encoding_embedding_size` to the size of the embedding for the encoder.
# - Set `decoding_embedding_size` to the size of the embedding for the decoder.
# - Set `learning_rate` to the learning rate.
# - Set `keep_probability` to the Dropout keep probability
# Number of Epochs
epochs = 10
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 256
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 100
decoding_embedding_size = 100
# Learning Rate
learning_rate = 0.002
# Dropout Keep Probability
keep_probability = 0.7
# ### Build the Graph
# Build the graph using the neural network you implemented.
# +
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
# -
# ### Train
# Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forums to see if anyone is having the same problem.
# +
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import time
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
if batch_i % 200 == 0 and batch_i > 0:
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
# -
# ### Save Parameters
# Save the `batch_size` and `save_path` parameters for inference.
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params(save_path)
# # Checkpoint
# +
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
# -
# ## Sentence to Sequence
# To feed a sentence into the model for translation, you first need to preprocess it. Implement the function `sentence_to_seq()` to preprocess new sentences.
#
# - Convert the sentence to lowercase
# - Convert words into ids using `vocab_to_int`
# - Convert words not in the vocabulary, to the `<UNK>` word id.
# +
def sentence_to_seq(sentence, vocab_to_int):
"""
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
"""
# TODO: Implement Function
sent = sentence.lower()
unk_id = vocab_to_int['<UNK>']
ids = [vocab_to_int.get(word, unk_id) for word in sent.split()]
return ids
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_sentence_to_seq(sentence_to_seq)
# -
# ## Translate
# This will translate `translate_sentence` from English to French.
# ### Google translation result:
# > il a vu un vieux camion jaune
#
# ### My seq2seq model translation result:
# > il a vu un camion jaune
#
# rouge means red in English
# +
translate_sentence = 'he saw a old yellow truck .'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
# -
# ## Imperfect Translation
# You might notice that some sentences translate better than others. Since the dataset you're using only has a vocabulary of 227 English words of the thousands that you use, you're only going to see good results using these words. Additionally, the translations in this data set were made by Google translate, so the translations themselves aren't particularly good. (We apologize to the French speakers out there!) Thankfully, for this project, you don't need a perfect translation. However, if you want to create a better translation model, you'll need better data.
#
# You can train on the [WMT10 French-English corpus](http://www.statmt.org/wmt10/training-giga-fren.tar). This dataset has more vocabulary and richer in topics discussed. However, this will take you days to train, so make sure you've a GPU and the neural network is performing well on dataset we provided. Just make sure you play with the WMT10 corpus after you've submitted this project.
# ## Submitting This Project
# When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_language_translation.ipynb" and save it as a HTML file under "File" -> "Download as". Include the "helper.py" and "problem_unittests.py" files in your submission.
| language-translation/dlnd_language_translation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Class Methods And Class variables
class Car:
base_price=100000 ##Class Variables
def __init__(self,windows,doors,power):
self.windows=windows
self.doors=doors
self.power=power
def what_base_price(self):
print("The base price is {}".format(self.base_price))
@classmethod
def revise_base_price(cls,inflation):
cls.base_price=cls.base_price+cls.base_price*inflation
Car.revise_base_price(0.10)
Car.base_price
car1=Car(4,5,2000)
car1.base_price
car1.revise_base_price(0.10)
car1.base_price
car3=Car(3,4,2000)
car3.base_price
| All about Python/Class Methods In Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Balanced Network De-embedding
# Demonstration of *balanced*, i.e. 2N-port, network de-embedding.
# ## Setup
# +
import numpy as np
import skrf as rf
rf.stylely()
import matplotlib.pyplot as plt
# -
# base parameters
freq = rf.Frequency(1e-3,10,1001,'ghz')
cpw = rf.media.CPW(freq, w=0.6e-3, s=0.25e-3, ep_r=10.6)
# # Build fixture network
# * short length of mismatched line with connector-like input shunt capacitance
# * some crosstalk added with nudge
# +
"""
l1
0----+-=======-2
|
= c1
|
GND
l1
1----+-=======-3
|
= c1
|
GND
"""
l1 = cpw.line(20, 'mm', z0=50, embed=True)
c1 = cpw.shunt_capacitor(C=0.15e-12, z0=50)
l1 = rf.connect(c1, 1, l1, 0)
li = rf.concat_ports([l1, l1], port_order='second')
Fix = li
Fix.name = 'Fix'
Fix.nudge(1e-4)
Left = Fix
# flip fixture for right side
Right = Fix.flipped()
# -
# ## Build DUT network
#
# * some length of mismatched lines
# * some crosstalk added with nudge
"""
l2
0-=======-2
l2
1-=======-3
"""
l2 = cpw.line(50, 'mm', z0=50, embed=True)
DUT = rf.concat_ports([l2, l2], port_order='second')
DUT.name = 'DUT'
DUT.nudge(1e-5)
# ## Build the measurement
#
# * cascade Left, DUT and Right
# * add some noise
"""
Left Meas Right
l1 l2 l1
0----+-=======-2 0-=======-2 0-=======-+----2
| |
= c1 = c1
| |
GND GND
l1 l2 l1
1----+-=======-3 1-=======-3 1-=======-+----3
| |
= c1 = c1
| |
GND GND
"""
Meas = Left ** DUT ** Right
Meas.name = 'Meas'
Meas.add_noise_polar(1e-5, 2)
# ## Perform dembedding
DUTd = Left.inv ** Meas ** Right.inv
DUTd.name = 'DUTd'
# ## Display results
# +
fig, axarr = plt.subplots(2,2, sharex=True, figsize=(10,6))
ax = axarr[0,0]
Meas.plot_s_db(m=0, n=0, ax=ax)
DUTd.plot_s_db(m=0, n=0, ax=ax)
DUT.plot_s_db(m=0, n=0, ax=ax, ls=':', color='0.0')
ax.set_title('Return Loss')
ax.legend(loc='lower center', ncol=3)
ax.grid(True)
ax = axarr[0,1]
Meas.plot_s_db(m=2, n=0, ax=ax)
DUTd.plot_s_db(m=2, n=0, ax=ax)
DUT.plot_s_db(m=2, n=0, ax=ax, ls=':', color='0.0')
ax.set_title('Insertion Loss')
ax.legend(loc='lower center', ncol=3)
ax.grid(True)
ax = axarr[1,0]
Meas.plot_s_db(m=1, n=0, ax=ax)
DUTd.plot_s_db(m=1, n=0, ax=ax)
DUT.plot_s_db(m=1, n=0, ax=ax, ls=':', color='0.0')
ax.set_title('Isolation')
ax.legend(loc='lower center', ncol=3)
ax.grid(True)
ax = axarr[1,1]
Meas.plot_s_deg(m=2, n=0, ax=ax)
DUTd.plot_s_deg(m=2, n=0, ax=ax, marker='o', markevery=25)
DUT.plot_s_deg(m=2, n=0, ax=ax, ls=':', color='0.0')
ax.set_title('Insertion Loss')
ax.legend(loc='lower center', ncol=3)
ax.grid(True)
fig.tight_layout()
| doc/source/examples/networktheory/Balanced Network De-embedding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: =base
# language: python
# name: base
# ---
import os
import cv2
import pandas as pd
import numpy as np
from image_augment_pairs import *
from matplotlib import pyplot as plt
from scipy.ndimage.interpolation import rotate
#img_dir = './batch1'
img_dir = './no_aug_batch1'
#img_dir = './'
def show_imgs(img_no, i, img_dir=img_dir):
print(img_no)
images = np.load(os.path.join(img_dir, 'val_images_%d.npy' % img_no))[i]
labels = np.load(os.path.join(img_dir, 'val_labels_%d.npy' % img_no))[i]
preds = np.load(os.path.join(img_dir, 'val_preds_%d.npy' % img_no))[i]
print(images.shape, labels.shape, np.sum(preds, axis=0).shape)
print(images.dtype, labels.dtype, preds.dtype)
plt.figure(figsize=(15,5))
plt.subplot(131)
plt.title('image')
plt.imshow(images.transpose(1,2,0)[:,:,::-1])
plt.subplot(132)
plt.title('label')
plt.imshow(labels)
plt.subplot(133)
plt.title('estimation')
plt.imshow(preds)
plt.show()
plt.savefig('./%s_%d.png' % (img_dir, img_no))
def show_imgs_dis(img_no, i):
print(img_no)
images = np.load(os.path.join(img_dir, 'val_images_%d.npy' % img_no))[i]
labels = np.load(os.path.join(img_dir, 'val_labels_%d.npy' % img_no))[i]
preds = np.load(os.path.join(img_dir, 'val_preds_%d.npy' % img_no))[i]
print(images.shape, labels.shape, preds.shape)
print(images.dtype, labels.dtype, preds.dtype)
plt.figure(figsize=(15,5))
plt.subplot(131)
plt.imshow(images.transpose(1,2,0)[:,:,::-1])
plt.subplot(132)
#plt.imshow(labels)
plt.hist(labels.flatten())
plt.subplot(133)
#plt.imshow(np.sum(preds, axis=0))
plt.hist(preds.flatten())
plt.show()
plt.savefig('./%s_%d.png' % ())
for i in range(1):
show_imgs(90, i)
for i in range(1):
show_imgs(20, i, './batch1')
# +
df = pd.read_csv('./no_aug_batch1/loss_classification.csv')
plt.plot(df['train'], label='train loss')
plt.plot(df['val'], label='val loss')
plt.title('Loss curves')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()
plt.grid()
plt.savefig('./loss_classification.png')
# +
df = pd.read_csv('./batch1/loss_classification.csv')
plt.plot(df['train'], label='train loss')
plt.plot(df['val'], label='val loss')
plt.title('Loss curves')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()
plt.grid()
plt.savefig('./loss_classification_hg.png')
# -
# ### Image augmentation
img_dir = '../FloorplanTransformation/pytorch/img_label_arr_train'
out_dir = './augmented_data'
imgs = [f for f in os.listdir(img_dir) if 'image' in f]
for img in imgs:
icon = img.replace('image', 'icon')
room = img.replace('image', 'room')
img_name = os.path.splitext(img)[0]
icon_name = os.path.splitext(icon)[0]
room_name = os.path.splitext(room)[0]
print(img_name, icon_name, room_name)
img_arr = np.load(os.path.join(img_dir, img)).transpose(1,2,0)
icon_arr = np.load(os.path.join(img_dir, icon))
room_arr = np.load(os.path.join(img_dir, room))
print(img_arr.shape, icon_arr.shape, room_arr.shape)
# 0
np.save(os.path.join(out_dir, '%s_0.npy' % img_name), img_arr)
np.save(os.path.join(out_dir, '%s_0.npy' % icon_name), icon_arr)
np.save(os.path.join(out_dir, '%s_0.npy' % room_name), room_arr)
# 0
np.save(os.path.join(out_dir, '%s_0.npy' % img_name), img_arr)
np.save(os.path.join(out_dir, '%s_0.npy' % icon_name), icon_arr)
np.save(os.path.join(out_dir, '%s_0.npy' % room_name), room_arr)
# 0
np.save(os.path.join(out_dir, '%s_0.npy' % img_name), img_arr)
np.save(os.path.join(out_dir, '%s_0.npy' % icon_name), icon_arr)
np.save(os.path.join(out_dir, '%s_0.npy' % room_name), room_arr)
# 1 -> 90 rotation
np.save(os.path.join(out_dir, '%s_90.npy' % img_name), rotate(img_arr, 90))
np.save(os.path.join(out_dir, '%s_90.npy' % icon_name), rotate(icon_arr, 90))
np.save(os.path.join(out_dir, '%s_90.npy' % room_name), rotate(room_arr, 90))
# 2 -> 90 rotation + h-flip
np.save(os.path.join(out_dir, '%s_90_h.npy' % img_name), rotate(img_arr, 90)[:,::-1,:])
np.save(os.path.join(out_dir, '%s_90_h.npy' % icon_name), rotate(icon_arr, 90)[:,::-1])
np.save(os.path.join(out_dir, '%s_90_h.npy' % room_name), rotate(room_arr, 90)[:,::-1])
# 3 -> 90 rotation + v-flip
np.save(os.path.join(out_dir, '%s_90_v.npy' % img_name), rotate(img_arr, 90)[::-1,:,:])
np.save(os.path.join(out_dir, '%s_90_v.npy' % icon_name), rotate(icon_arr, 90)[::-1,:])
np.save(os.path.join(out_dir, '%s_90_v.npy' % room_name), rotate(room_arr, 90)[::-1,:])
break
plt.imshow(img_arr)
plt.figure(figsize=(10,10))
plt.imshow(rotate(img_arr, 180))
plt.figure(figsize=(10,10))
plt.imshow(img_arr[::-1,:,:])
| check_result_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Gaussian Blur, Medical Images
# ### Import resources and display image
# +
import numpy as np
import matplotlib.pyplot as plt
import cv2
# %matplotlib inline
# Read in the image
image = cv2.imread('images/brain_MR.jpg')
# Make a copy of the image
image_copy = np.copy(image)
# Change color to RGB (from BGR)
image_copy = cv2.cvtColor(image_copy, cv2.COLOR_BGR2RGB)
plt.imshow(image_copy)
# -
# ### Gaussian blur the image
# +
# Convert to grayscale for filtering
gray = cv2.cvtColor(image_copy, cv2.COLOR_RGB2GRAY)
# Create a Gaussian blurred image
gray_blur = cv2.GaussianBlur(gray, (9, 9), 0)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('original gray')
ax1.imshow(gray, cmap='gray')
ax2.set_title('blurred image')
ax2.imshow(gray_blur, cmap='gray')
# -
# ### Test performance with a high-pass filter
# +
# High-pass filter
# 3x3 sobel filters for edge detection
sobel_x = np.array([[ -1, 0, 1],
[ -2, 0, 2],
[ -1, 0, 1]])
sobel_y = np.array([[ -1, -2, -1],
[ 0, 0, 0],
[ 1, 2, 1]])
# Filter the orginal and blurred grayscale images using filter2D
filtered = cv2.filter2D(gray, -1, sobel_x)
filtered_blurred = cv2.filter2D(gray_blur, -1, sobel_y)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('original gray')
ax1.imshow(filtered, cmap='gray')
ax2.set_title('blurred image')
ax2.imshow(filtered_blurred, cmap='gray')
# +
# Create threshold that sets all the filtered pixels to white
# Above a certain threshold
retval, binary_image = cv2.threshold(filtered_blurred, 50, 255, cv2.THRESH_BINARY)
plt.imshow(binary_image, cmap='gray')
# -
| 1_2_Convolutional_Filters_Edge_Detection/.ipynb_checkpoints/3. Gaussian Blur-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import altair as alt
# +
df = alt.pd.read_csv("../data/processed/data_cleaned.csv")
df.head()
# +
domains = ["1) Bajo", "2) Medio Bajo", "3) Medio Alto", "4) Alto"]
colors = ["#fcdfef", "#f59fce", "#eb409d", "#e4007c"]
color_scale = alt.Scale(
domain=domains,
range=colors
)
chart = alt.Chart(df).mark_bar(strokeWidth = .5, stroke = "#282828").encode(
x = alt.X("sum(cuenta)", title = "", stack = "normalize", axis = alt.Axis(format = "%"), ),
y = alt.Y("nom_entidad:N", title = "entidad federativa",),
color = alt.Color("estratos:O", legend = alt.Legend(title = "estrato s.e.",), scale = color_scale,),
column = alt.Column("tono_de_color_de_piel:O", sort = "ascending", title = "", header = alt.Header(title = "")),
).properties(
width = 50,
height = 700,
title = "06 Visualizing Strata (Remix)"
)
chart.configure_view(width = 1080)
# -
# %run scripts/cimarron_theme_mod.py
# + language="html"
# <style>
# @import url('https://fonts.googleapis.com/css?family=Ubuntu|Ubuntu+Condensed|Ubuntu+Mono');
# </style>
# -
random_data = {"a": [2,1,5,5,7,8], "b": [4,5,7,8,4,6]}
text = alt.pd.DataFrame(random_data)
source = alt.Chart(text).mark_text(text = "Source: INEGI, MMSI 2016")
# +
domains2 = [1,2,3,4,5,6,7,8,9,10,11]
colors2 = ["#322e25", "#3d230c", "#4a382e", "#684d3c", "#7e6352", "#95765a", "#b59a7f", "#dec09a", "#e1b8b2", "#f2d0d1", "#f9ebeb"]
color_scale2 = alt.Scale(
domain=domains2,
range=colors2
)
color_legend = alt.Chart(df).mark_bar(strokeWidth = 0.5, stroke = "#282828").encode(
y = alt.Y("tono_de_color_de_piel:N", title = "", axis = alt.Axis(domain = False, ticks = False, offset = -40)),
color = alt.Color("tono_de_color_de_piel:N", legend = None, scale = color_scale2 )
).properties(
width = 100,
height = 300
)
color_legend
# -
| #100Viz/06 - Visualizing Strata (Remix)/src/06 Visualizing Strata (Remix).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# # Number of collisions
# +
# %matplotlib widget
# global import
import numpy as np
import pandas as pd
# plotting
import matplotlib.pyplot as plt
import seaborn as sns
# local import
import lppydsmc as ld
seed = 1111
np.random.seed(seed)
# -
def number_of_collisions(mass, radius, quantity, time_step, iterations, temperature = 300, density = 3.2e19, cell_volume = 1e-9, seed = None):
import pandas as pd
import lppydsmc as ld
import numpy as np
from tqdm import tqdm
from pathlib import Path
np.random.seed(seed)
collisions_list = []
points = 1e-3*np.array([[0,0],[0,1],[1,1],[1,0]])
positions = ld.initialization.particles.uniform_position(quantity, points)
vel_std = ld.utils.physics.gaussian(temperature, mass)
velocities = np.random.normal(loc=0.0, scale=vel_std, size = (quantity,3))
arrays = [np.concatenate((positions, velocities), axis = 1)]
grid = np.array([np.zeros((quantity,2))]) # 1 cell
grid[0][:,1] = np.array([k for k in range(quantity)])
currents = np.array([quantity])
averages = np.array([quantity])
cross_section = np.pi*4*radius*radius
cross_sections = np.array([[cross_section]])
max_proba = np.array([3*cross_section*ld.utils.physics.maxwellian_mean_speed(temperature, mass)])
target_density = density
cell_volume = cell_volume
particles_weight = target_density*cell_volume/quantity
remains_per_cell = np.array([0.])
masses = np.array([mass])
monitoring = None
group_fn = None
period_saving = 10
for iteration in tqdm(range(1,iterations+1)) :
results = ld.collision.handler_particles_collisions(arrays, grid, currents, time_step, \
averages, max_proba, cross_sections, cell_volume, particles_weight, remains_per_cell, masses, monitoring = monitoring, group_fn = group_fn)
collisions_list.append(results)
# if(iteration%100 == 0):
# print(ld.collision.collider.candidates(currents, time_step, averages, max_proba, cell_volume, particles_weight, remains_per_cell))
# print(remains_per_cell)
# fig, ax = plt.subplots(3)
# ax[0].hist(arrays[0][:,2],color = 'r', bins = 30)
# ax[1].hist(arrays[0][:,3], color = 'g', bins = 30)
# ax[2].hist(arrays[0][:,4], color = 'b', bins = 30)
return collisions_list
mass = 2.16e-25 # kg
time_step = 1e-5
radius = 2e-10
J_TO_EV = 1/1.6e-19
iterations = 1000
T = 300
n = 3.2e19
quantity = int(1e5)
cell_volume = 1e-9
collisions_list = number_of_collisions(mass = mass, radius = radius, quantity = quantity, time_step = time_step, \
iterations = iterations, temperature = T, density= n,cell_volume = cell_volume, seed = 1111)
fig, ax = plt.subplots()
ax.plot(collisions_list)
# +
BOLTZMAN_CONSTANT = 1.38064e-23 # J K−1
def relative_speed_maxwellian(temperature,mass):
return 4/np.sqrt(np.pi)*np.sqrt(BOLTZMAN_CONSTANT*temperature/mass)
# 1st : theoretical collision frequency for MB distribution
def number_of_collisions_per_time_and_volume(T, m, d, n): # per unit of volume and time
cross_section = np.pi*d*d
return 1/2 * cross_section *n**2 * relative_speed_maxwellian(T,m)
# -
relative_speed_maxwellian(T, mass)
nu_c = number_of_collisions_per_time_and_volume(T = T, m = mass, d=2*radius, n = n)
simu_time = iterations*time_step
volume_system = 1e-9
particles_weight = n*volume_system/quantity
particles_weight
collisions_nb = np.sum(np.array(collisions_list)) # we count twice too many
print('Total number of collision : {:e}'.format(collisions_nb));
print('Equivalent in reality : {:e}'.format(particles_weight*collisions_nb))
print('Expected number of collisions in reality : {:e}'.format(nu_c*simu_time*volume_system))
print('Ratio of the two : {:.2}'.format(100*(particles_weight*collisions_nb-nu_c*simu_time*volume_system)/(nu_c*simu_time*volume_system)))
| benchmarks/notebooks/number_of_collisions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso, Ridge
plt.rcParams.update({'font.size': 20})
import numpy as np
# -
# # Model order selection
#
#
# We will try fitting a polynomial model to noisy data, where the true signal is a sinusoid.
#
# +
npoints = 20
sigma = 0.2
x = 2*np.pi*np.arange(npoints)/npoints
yorig = np.sin(x)
# Seeding the random number generator to get consistent results. You may change this
np.random.seed(seed=10)
y = yorig + sigma*np.random.normal(size=npoints)
fig = plt.figure()
ax = fig.gca()
cs = ax.plot(x, y,'ro',label='Noisy samples')
cs = ax.plot(x, yorig,'b')
cs = ax.plot(x,yorig,'ko',label='Original samples')
legend = ax.legend(loc='lower left', shadow=True, fontsize='x-small')
s=plt.title('Data')
# -
# ### Solving using polyfit (built-in function) with d=6
#
# Polyfit fits a polynomial of degree d to the data to obtain the parameters
# +
d = 13 # degree 1 polynomial
weights = np.polyfit(x,y,d) # Last argument is degree of polynomial
f = np.polyval(weights,x)
# Plotting
fig1 = plt.figure()
ax1 = fig1.gca()
s=ax1.plot(x, y,'ro',label='Measurements')
s=plt.plot(x,f,'r--',label='Fit')
s=plt.plot(x,yorig,label='Original')
s=plt.ylabel('y')
s=plt.xlabel('x')
legend = ax1.legend(loc='lower left', shadow=True, fontsize='x-small')
plt.title('Fits')
plt.show()
fig2 = plt.figure()
plt.stem(weights,use_line_collection=True)
s=plt.title('Coefficients')
# -
# ## Plotting the fit error as a function of the model order
#
# We will vary the model order and perform fitting, while evaluating the error in the fits. Note that the error decreases as the model order increases. Polyfit will spit out warnings with increasing d as the matrix $\mathbf X^T\mathbf X$ becomes closer to non-invertible.
# <font color=red> You should pay attention to the warnings since the fits may be poor. You need to choose a lower model order or add regularization. </font>
# + tags=[]
err_array = []
for d in range(20):
weights = np.polyfit(x,y,d)
f = np.polyval(weights,x)
err_array.append(np.linalg.norm(f-y))
s=plt.plot(err_array)
s=plt.title("Training error with increasing model order")
# -
# ## <font color=red> Validation for model order selection</font>
#
# You will split the training data to validation data and fitting data. The fit will be performed using the fitting data. Store the training errors and validation errors and plot. Determine the optimal model order from the plots as the minimum of the validation error.
# + tags=[]
# Random splitting routine from scikit_learn
x_train, x_validation , y_train, y_validation = train_test_split(x, y, test_size=0.25,random_state=32)
train_err_array = []
validation_err_array = []
fig3 = plt.figure()
ax3 = fig3.gca()
for model_order in range(18):
weights = np.polyfit(x_train,y_train,model_order)
f_train = np.polyval(weights,x_train)
train_err_array.append(np.linalg.norm(f_train-y_train))
f_validation = np.polyval(weights,x_validation)
validation_err_array.append(np.linalg.norm(f_validation-y_validation))
s=plt.plot(train_err_array,'r',label='Training error')
s=plt.plot(validation_err_array,label='Validation error')
s=plt.plot(validation_err_array,'k*')
s = plt.xlabel('model order')
s = plt.ylabel('Error')
s = plt.ylim([0,5])
s = plt.grid()
legend = ax3.legend(loc='upper left', shadow=True, fontsize='x-small')
plt.show()
# -
# <font color=red>Pick the model order that gives you the minimum validation error and evaluate the fit to the data. Show the fit as well </font>
# +
# YOUR CODE HERE
minValModOrder = np.asarray(validation_err_array).argmin()
weights = np.polyfit(x_train, y_train, minValModOrder)
f_optimal_model_order = np.polyval(weights, x)
# Plotting
fig1 = plt.figure()
ax1 = fig1.gca()
s=ax1.plot(x, y,'ro',label='Measurements')
s=plt.plot(x,f_optimal_model_order,'r--',label='Optimal fit')
s=plt.plot(x,yorig,label='Original')
s=plt.ylabel('y')
s=plt.xlabel('x')
legend = ax1.legend(loc='lower left', shadow=True, fontsize='x-small')
plt.title('Optimal model order fit')
plt.show()
fig2 = plt.figure()
plt.stem(weights,use_line_collection=True)
s=plt.title('Coefficients')
# -
| model_order.ipynb |