file_id
stringlengths 6
9
| content
stringlengths 1.13k
279k
| local_path
stringlengths 67
70
| kaggle_dataset_name
stringclasses 28
values | kaggle_dataset_owner
stringclasses 24
values | kversion
stringlengths 508
571
| kversion_datasetsources
stringlengths 78
322
⌀ | dataset_versions
stringclasses 29
values | datasets
stringclasses 29
values | users
stringclasses 24
values | script
stringlengths 1.1k
279k
| df_info
stringclasses 1
value | has_data_info
bool 1
class | nb_filenames
int64 0
2
| retreived_data_description
stringclasses 1
value | script_nb_tokens
int64 300
71.7k
| upvotes
int64 0
26
| tokens_description
int64 6
2.5k
| tokens_script
int64 300
71.7k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
69393429 | <kaggle_start><data_title>progresbar2-local<data_name>progresbar2local
<code># # The Bernstein Bears CRP Submission 1
# install necessary libraries from input
# import progressbar library for offline usage
# import text stat library for additional ml data prep
FAST_DEV_RUN = False
USE_CHECKPOINT = True
USE_HIDDEN_IN_RGR = False
N_FEATURES_TO_USE_HEAD = 1
N_FEATURES_TO_USE_TAIL = None
# in this kernel, run train on all data to maximize score on held out data but use what we learned about optimal parameters
# set to 16 bit precision to cut compute requirements/increase batch size capacity
USE_16_BIT_PRECISION = True
# set a seed value for consistent experimentation; optional, else leave as None
SEED_VAL = 42
# set a train-validation split, .7 means 70% of train data and 30% to validation set
TRAIN_VALID_SPLIT = 0.8 # if None, then don't split
# set hyperparameters learned from tuning: https://www.kaggle.com/justinchae/tune-roberta-pytorch-lightning-optuna
MAX_EPOCHS = 4
BATCH_SIZE = 16
GRADIENT_CLIP_VAL = 0.18318092164684585
LEARNING_RATE = 3.613894271216525e-05
TOKENIZER_MAX_LEN = 363
WARMUP_STEPS = 292
WEIGHT_DECAY = 0.004560699842170359
import kaggle_config
from kaggle_config import (
WORKFLOW_ROOT,
DATA_PATH,
CACHE_PATH,
FIG_PATH,
MODEL_PATH,
ANALYSIS_PATH,
KAGGLE_INPUT,
CHECKPOINTS_PATH,
LOGS_PATH,
)
INPUTS, DEVICE = kaggle_config.run()
KAGGLE_TRAIN_PATH = kaggle_config.get_train_path(INPUTS)
KAGGLE_TEST_PATH = kaggle_config.get_test_path(INPUTS)
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.tuner.batch_size_scaling import scale_batch_size
from pytorch_lightning.tuner.lr_finder import _LRFinder, lr_find
import torchmetrics
import optuna
from optuna.integration import PyTorchLightningPruningCallback
from optuna.samplers import TPESampler, RandomSampler, CmaEsSampler
from optuna.visualization import (
plot_intermediate_values,
plot_optimization_history,
plot_param_importances,
)
import optuna.integration.lightgbm as lgb
import lightgbm as lgm
from sklearn.model_selection import (
KFold,
cross_val_score,
RepeatedKFold,
train_test_split,
)
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.decomposition import PCA
from sklearn.feature_selection import (
RFE,
f_regression,
mutual_info_regression,
SequentialFeatureSelector,
)
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
import math
import textstat
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataset import random_split
import tensorflow as tf
from transformers import (
RobertaForSequenceClassification,
RobertaTokenizer,
AdamW,
get_linear_schedule_with_warmup,
)
import os
import pandas as pd
import numpy as np
import gc
from functools import partial
from typing import List, Dict
from typing import Optional
from argparse import ArgumentParser
import random
if SEED_VAL:
random.seed(SEED_VAL)
np.random.seed(SEED_VAL)
seed_everything(SEED_VAL)
NUM_DATALOADER_WORKERS = os.cpu_count()
try:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="")
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
n_tpus = len(tf.config.list_logical_devices("TPU"))
except ValueError:
n_tpus = 0
ACCELERATOR_TYPE = {}
ACCELERATOR_TYPE.update(
{"gpus": torch.cuda.device_count() if torch.cuda.is_available() else None}
)
ACCELERATOR_TYPE.update({"tpu_cores": n_tpus if n_tpus > 0 else None})
# still debugging how to best toggle between tpu and gpu; there's too much code to configure to work simply
print("ACCELERATOR_TYPE:\n", ACCELERATOR_TYPE)
PRETTRAINED_ROBERTA_BASE_MODEL_PATH = "/kaggle/input/pre-trained-roberta-base"
PRETRAINED_ROBERTA_BASE_TOKENIZER_PATH = "/kaggle/input/tokenizer-roberta"
PRETRAINED_ROBERTA_BASE_TOKENIZER = RobertaTokenizer.from_pretrained(
PRETRAINED_ROBERTA_BASE_TOKENIZER_PATH
)
TUNED_CHECKPOINT_PATH = "/kaggle/input/best-crp-ckpt-4/crp_roberta_trial_4.ckpt"
# from: https://www.kaggle.com/justinchae/crp-regression-with-roberta-and-lightgbm
TUNED_BEST_ROBERTA_PATH = "/kaggle/input/my-best-tuned-roberta"
"""Implementing Lightning instead of torch.nn.Module
"""
class LitRobertaLogitRegressor(pl.LightningModule):
def __init__(
self,
pre_trained_path: str,
output_hidden_states: bool = False,
num_labels: int = 1,
layer_1_output_size: int = 64,
layer_2_output_size: int = 1,
learning_rate: float = 1e-5,
task_name: Optional[str] = None,
warmup_steps: int = 100,
weight_decay: float = 0.0,
adam_epsilon: float = 1e-8,
batch_size: Optional[int] = None,
train_size: Optional[int] = None,
max_epochs: Optional[int] = None,
n_gpus: Optional[int] = 0,
n_tpus: Optional[int] = 0,
accumulate_grad_batches=None,
tokenizer=None,
do_decode=False,
):
"""refactored from: https://www.kaggle.com/justinchae/my-bert-tuner and https://www.kaggle.com/justinchae/roberta-tuner"""
super(LitRobertaLogitRegressor, self).__init__()
# this saves class params as self.hparams
self.save_hyperparameters()
self.model = RobertaForSequenceClassification.from_pretrained(
self.hparams.pre_trained_path,
output_hidden_states=self.hparams.output_hidden_states,
num_labels=self.hparams.num_labels,
)
self.accelerator_multiplier = n_gpus if n_gpus > 0 else 1
self.config = self.model.config
self.parameters = self.model.parameters
self.save_pretrained = self.model.save_pretrained
# these layers are not currently used, tbd in future iteration
self.layer_1 = torch.nn.Linear(768, layer_1_output_size)
self.layer_2 = torch.nn.Linear(layer_1_output_size, layer_2_output_size)
self.tokenizer = tokenizer
self.do_decode = do_decode
self.output_hidden_states = output_hidden_states
def rmse_loss(x, y):
criterion = F.mse_loss
loss = torch.sqrt(criterion(x, y))
return loss
# TODO: enable toggle for various loss funcs and torchmetrics package
self.loss_func = rmse_loss
# self.eval_func = rmse_loss
def setup(self, stage=None) -> None:
if stage == "fit":
# when this class is called by trainer.fit, this stage runs and so on
# Calculate total steps
tb_size = self.hparams.batch_size * self.accelerator_multiplier
ab_size = self.hparams.accumulate_grad_batches * float(
self.hparams.max_epochs
)
self.total_steps = (self.hparams.train_size // tb_size) // ab_size
def extract_logit_only(self, input_ids, attention_mask) -> float:
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
logit = output.logits
logit = logit.cpu().numpy().astype(float)
return logit
def extract_hidden_only(self, input_ids, attention_mask) -> np.array:
output = self.model(input_ids=input_ids, attention_mask=input_ids)
hidden_states = output.hidden_states
x = torch.stack(hidden_states[-4:]).sum(0)
m1 = torch.nn.Sequential(self.layer_1, self.layer_2, torch.nn.Flatten())
x = m1(x)
x = torch.squeeze(x).cpu().numpy()
return x
def forward(self, input_ids, attention_mask) -> torch.Tensor:
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
x = output.logits
return x
def training_step(self, batch, batch_idx: int) -> float:
# refactored from: https://www.kaggle.com/justinchae/epoch-utils
labels, encoded_batch, kaggle_ids = batch
input_ids = encoded_batch["input_ids"]
attention_mask = encoded_batch["attention_mask"]
# per docs, keep train step separate from forward call
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
y_hat = output.logits
# quick reshape to align labels to predictions
labels = labels.view(-1, 1)
loss = self.loss_func(y_hat, labels)
self.log("train_loss", loss)
return loss
def validation_step(self, batch, batch_idx: int) -> float:
# refactored from: https://www.kaggle.com/justinchae/epoch-utils
labels, encoded_batch, kaggle_ids = batch
input_ids = encoded_batch["input_ids"]
attention_mask = encoded_batch["attention_mask"]
# this self call is calling the forward method
y_hat = self(input_ids, attention_mask)
# quick reshape to align labels to predictions
labels = labels.view(-1, 1)
loss = self.loss_func(y_hat, labels)
self.log("val_loss", loss)
return loss
def predict(self, batch, batch_idx: int, dataloader_idx: int = None):
# creating this predict method overrides the pl predict method
target, encoded_batch, kaggle_ids = batch
input_ids = encoded_batch["input_ids"]
attention_mask = encoded_batch["attention_mask"]
# this self call is calling the forward method
y_hat = self(input_ids, attention_mask)
# convert to numpy then list like struct to zip with ids
y_hat = y_hat.cpu().numpy().ravel()
# customizing the predict behavior to account for unique ids
if self.tokenizer is not None and self.do_decode:
target = target.cpu().numpy().ravel() if len(target) > 0 else None
excerpt = self.tokenizer.batch_decode(
input_ids.cpu().numpy(),
skip_special_tokens=True,
clean_up_tokenization_spaces=True,
)
if self.output_hidden_states:
hidden_states = self.extract_hidden_only(
input_ids=input_ids, attention_mask=attention_mask
)
else:
hidden_states = None
if target is not None:
predictions = list(
zip(
kaggle_ids,
target,
y_hat
# , hidden_states
)
)
predictions = pd.DataFrame(
predictions,
columns=[
"id",
"target",
"logit"
# , 'hidden_states'
],
)
else:
predictions = list(
zip(
kaggle_ids,
y_hat
# , hidden_states
)
)
predictions = pd.DataFrame(
predictions,
columns=[
"id",
"logit"
# , 'hidden_states'
],
)
else:
predictions = list(zip(kaggle_ids, y_hat))
predictions = pd.DataFrame(predictions, columns=["id", "target"])
return predictions
def configure_optimizers(self) -> torch.optim.Optimizer:
# Reference: https://pytorch-lightning.readthedocs.io/en/latest/notebooks/lightning_examples/text-transformers.html
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.hparams.learning_rate,
eps=self.hparams.adam_epsilon,
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.hparams.warmup_steps,
num_training_steps=self.total_steps,
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return [optimizer], [scheduler]
def my_collate_fn(
batch,
tokenizer,
max_length: int = 100,
return_tensors: str = "pt",
padding: str = "max_length",
truncation: bool = True,
):
# source: https://www.kaggle.com/justinchae/nn-utils
labels = []
batch_texts = []
kaggle_ids = []
for _label, batch_text, kaggle_id in batch:
if _label is not None:
labels.append(_label)
batch_texts.append(batch_text)
kaggle_ids.append(kaggle_id)
if _label is not None:
labels = torch.tensor(labels, dtype=torch.float)
encoded_batch = tokenizer(
batch_texts,
return_tensors=return_tensors,
padding=padding,
max_length=max_length,
truncation=truncation,
)
return labels, encoded_batch, kaggle_ids
class CommonLitDataset(Dataset):
def __init__(
self,
df,
text_col: str = "excerpt",
label_col: str = "target",
kaggle_id: str = "id",
sample_size: Optional[str] = None,
):
self.df = df if sample_size is None else df.sample(sample_size)
self.text_col = text_col
self.label_col = label_col
self.kaggle_id = kaggle_id
self.num_labels = (
len(df[label_col].unique()) if label_col in df.columns else None
)
# source: https://www.kaggle.com/justinchae/nn-utils
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
result = None
text = self.df.iloc[idx][self.text_col]
kaggle_id = self.df.iloc[idx][self.kaggle_id]
if "target" in self.df.columns:
target = self.df.iloc[idx][self.label_col]
return target, text, kaggle_id
else:
return None, text, kaggle_id
class CommonLitDataModule(pl.LightningDataModule):
def __init__(
self,
tokenizer,
train_path,
collate_fn=None,
max_length: int = 280,
batch_size: int = 16,
valid_path: Optional[str] = None,
test_path: Optional[str] = None,
train_valid_split: float = 0.6,
dtypes=None,
shuffle_dataloader: bool = True,
num_dataloader_workers: int = NUM_DATALOADER_WORKERS,
kfold: Optional[dict] = None,
):
super(CommonLitDataModule, self).__init__()
self.tokenizer = tokenizer
self.train_path = train_path
self.valid_path = valid_path
self.test_path = test_path
self.train_valid_split = train_valid_split
self.dtypes = {"id": str} if dtypes is None else dtypes
self.train_size = None
self.train_df, self.train_data = None, None
self.valid_df, self.valid_data = None, None
self.test_df, self.test_data = None, None
if collate_fn is not None:
self.collate_fn = partial(
collate_fn, tokenizer=tokenizer, max_length=max_length
)
else:
self.collate_fn = partial(
my_collate_fn, batch=batch_size, tokenizer=tokenizer
)
self.shuffle_dataloader = shuffle_dataloader
self.batch_size = batch_size
self.num_dataloader_workers = num_dataloader_workers
# refactored from: https://www.kaggle.com/justinchae/nn-utils
def _strip_extraneous(self, df):
strip_cols = ["url_legal", "license"]
if all(col in df.columns for col in strip_cols):
extraneous_data = strip_cols
return df.drop(columns=extraneous_data)
else:
return df
def prepare(self, prep_type=None):
if prep_type == "train":
# creates just an instance of the train data as a pandas df
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
if prep_type == "train_stage_2":
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
self.train_size = int(len(self.train_df))
self.train_data = CommonLitDataset(df=self.train_df)
def setup(self, stage: Optional[str] = None) -> None:
if stage == "fit":
# when this class is called by trainer.fit, this stage runs and so on
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
self.train_size = int(len(self.train_df))
self.train_data = CommonLitDataset(df=self.train_df)
if self.train_valid_split is not None and self.valid_path is None:
self.train_size = int(len(self.train_df) * self.train_valid_split)
self.train_data, self.valid_data = random_split(
self.train_data,
[self.train_size, len(self.train_df) - self.train_size],
)
elif self.valid_path is not None:
self.valid_df = (
self.valid_path
if isinstance(self.valid_path, pd.DataFrame)
else pd.read_csv(self.valid_path, dtype=self.dtypes)
)
self.valid_data = CommonLitDataset(df=self.valid_df)
if stage == "predict":
self.test_df = (
self.test_path
if isinstance(self.test_path, pd.DataFrame)
else pd.read_csv(self.test_path, dtype=self.dtypes)
)
self.test_df = self._strip_extraneous(self.test_df)
self.test_data = CommonLitDataset(df=self.test_df)
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
self.train_size = int(len(self.train_df))
self.train_data = CommonLitDataset(df=self.train_df)
def kfold_data(self):
# TODO: wondering how to integrate kfolds into the datamodule
pass
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_data,
batch_size=self.batch_size,
shuffle=self.shuffle_dataloader,
collate_fn=self.collate_fn,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
def val_dataloader(self) -> DataLoader:
if self.valid_data is None:
return None
else:
return DataLoader(
self.valid_data,
batch_size=self.batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
def predict_dataloader(self) -> DataLoader:
if self.test_data is None:
return None
else:
return DataLoader(
self.test_data,
batch_size=self.batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
def add_textstat_features(df):
# adding the text standard seems to boost the accuracy score a bit
df["text_standard"] = df["excerpt"].apply(lambda x: textstat.text_standard(x))
df["text_standard_category"] = df["text_standard"].astype("category").cat.codes
# counting ratio of difficult words by lexicon count
df["difficult_words_ratio"] = df["excerpt"].apply(
lambda x: textstat.difficult_words(x)
)
df["difficult_words_ratio"] = df.apply(
lambda x: x["difficult_words_ratio"] / textstat.lexicon_count(x["excerpt"]),
axis=1,
)
df["syllable_ratio"] = df["excerpt"].apply(lambda x: textstat.syllable_count(x))
df["syllable_ratio"] = df.apply(
lambda x: x["syllable_ratio"] / textstat.lexicon_count(x["excerpt"]), axis=1
)
### You can add/remove any feature below and it will be used in training and test
df["coleman_liau_index"] = df["excerpt"].apply(
lambda x: textstat.coleman_liau_index(x)
)
df["flesch_reading_ease"] = df["excerpt"].apply(
lambda x: textstat.flesch_reading_ease(x)
)
df["smog_index"] = df["excerpt"].apply(lambda x: textstat.smog_index(x))
df["gunning_fog"] = df["excerpt"].apply(lambda x: textstat.gunning_fog(x))
df["flesch_kincaid_grade"] = df["excerpt"].apply(
lambda x: textstat.flesch_kincaid_grade(x)
)
df["automated_readability_index"] = df["excerpt"].apply(
lambda x: textstat.automated_readability_index(x)
)
df["dale_chall_readability_score"] = df["excerpt"].apply(
lambda x: textstat.dale_chall_readability_score(x)
)
df["linsear_write_formula"] = df["excerpt"].apply(
lambda x: textstat.linsear_write_formula(x)
)
###
df = df.drop(columns=["excerpt", "text_standard"])
return df
def process_hidden_states(df, drop_hidden_states=False):
# for convenience, moving hidden states to the far right of the df
if drop_hidden_states:
df.drop(columns=["hidden_states"], inplace=True)
return df
elif "hidden_states" in df.columns:
df["hidden_state"] = df["hidden_states"]
df.drop(columns=["hidden_states"], inplace=True)
temp = df["hidden_state"].apply(pd.Series)
temp = temp.rename(columns=lambda x: "hidden_state_" + str(x))
df = pd.concat([df, temp], axis=1)
df.drop(columns=["hidden_state"], inplace=True)
return df
else:
print("hidden_states not found in dataframe, skipping process_hidden_states")
return df
datamodule = CommonLitDataModule(
collate_fn=my_collate_fn,
tokenizer=PRETRAINED_ROBERTA_BASE_TOKENIZER,
train_path=KAGGLE_TRAIN_PATH,
test_path=KAGGLE_TEST_PATH,
max_length=TOKENIZER_MAX_LEN,
batch_size=BATCH_SIZE,
train_valid_split=TRAIN_VALID_SPLIT,
)
# manually calling this stage since we need some params to set up model initially
datamodule.setup(stage="fit")
if USE_CHECKPOINT:
# model = LitRobertaLogitRegressor.load_from_checkpoint(TUNED_CHECKPOINT_PATH)
trainer = pl.Trainer(
gpus=ACCELERATOR_TYPE["gpus"], tpu_cores=ACCELERATOR_TYPE["tpu_cores"]
)
model = LitRobertaLogitRegressor(
pre_trained_path=TUNED_BEST_ROBERTA_PATH,
train_size=datamodule.train_size,
batch_size=datamodule.batch_size,
output_hidden_states=USE_HIDDEN_IN_RGR,
n_gpus=ACCELERATOR_TYPE["gpus"],
accumulate_grad_batches=trainer.accumulate_grad_batches,
learning_rate=LEARNING_RATE,
warmup_steps=WARMUP_STEPS,
max_epochs=MAX_EPOCHS,
tokenizer=datamodule.tokenizer,
)
trainer = pl.Trainer(
gpus=ACCELERATOR_TYPE["gpus"], tpu_cores=ACCELERATOR_TYPE["tpu_cores"]
)
else:
checkpoint_filename = f"crp_roberta_trial_main"
checkpoint_save = ModelCheckpoint(
dirpath=CHECKPOINTS_PATH, filename=checkpoint_filename
)
early_stopping_callback = EarlyStopping(monitor="val_loss", patience=2)
trainer = pl.Trainer(
max_epochs=MAX_EPOCHS,
gpus=ACCELERATOR_TYPE["gpus"],
tpu_cores=ACCELERATOR_TYPE["tpu_cores"],
precision=16 if USE_16_BIT_PRECISION else 32,
default_root_dir=CHECKPOINTS_PATH,
gradient_clip_val=GRADIENT_CLIP_VAL,
stochastic_weight_avg=True,
callbacks=[checkpoint_save, early_stopping_callback],
fast_dev_run=FAST_DEV_RUN,
)
model = LitRobertaLogitRegressor(
pre_trained_path=PRETTRAINED_ROBERTA_BASE_MODEL_PATH,
train_size=datamodule.train_size,
batch_size=datamodule.batch_size,
n_gpus=trainer.gpus,
n_tpus=trainer.tpu_cores,
max_epochs=trainer.max_epochs,
accumulate_grad_batches=trainer.accumulate_grad_batches,
learning_rate=LEARNING_RATE,
warmup_steps=WARMUP_STEPS,
tokenizer=datamodule.tokenizer,
)
trainer.fit(model, datamodule=datamodule)
# let's also save the tuned roberta state which our model wraps around
model_file_name = f"tuned_roberta_model"
model_file_path = os.path.join(MODEL_PATH, model_file_name)
model.save_pretrained(model_file_path)
# clean up memory
torch.cuda.empty_cache()
gc.collect()
# freeze the model for prediction
model.eval()
model.freeze()
datamodule.setup(stage="predict")
model.do_decode = True
# run predict on the test data
train_data_stage_two = trainer.predict(
model=model, dataloaders=datamodule.train_dataloader()
)
train_data_stage_two = pd.concat(train_data_stage_two).reset_index(drop=True)
train_data_stage_two = pd.merge(
left=train_data_stage_two,
right=datamodule.train_df.drop(columns=["standard_error", "target"]),
left_on="id",
right_on="id",
)
print(train_data_stage_two)
# TODO: test whether we need to save and upload the fine-tuned state of roberta or if pytorch lightning checkpoints take care of it all
train_data_stage_three = add_textstat_features(train_data_stage_two)
label_data = train_data_stage_three[["id"]].copy(deep=True)
train_data = train_data_stage_three.drop(
columns=["id", "target", "text_standard_category"]
).copy(deep=True)
train_data_cols = list(train_data.columns)
target_data = train_data_stage_three[["target"]].copy(deep=True)
scaler = StandardScaler()
train_data_scaled = scaler.fit_transform(train_data)
train_data_scaled = pd.DataFrame(train_data_scaled, columns=train_data_cols)
TARGET_SCALER = StandardScaler()
target_data_scaled = TARGET_SCALER.fit_transform(target_data)
target_data_scaled = pd.DataFrame(target_data_scaled, columns=["target"])
regr = SVR(kernel="linear")
regr.fit(train_data_scaled, target_data_scaled["target"])
print(" Assessment of Features ")
print("R2 Score: ", regr.score(train_data_scaled, target_data_scaled["target"]))
print(
"RSME Score: ",
math.sqrt(
mean_squared_error(
target_data_scaled["target"], regr.predict(train_data_scaled)
)
),
)
# regr.coef_ is a array of n, 1
feats_coef = list(zip(train_data_cols, regr.coef_[0]))
feature_analysis = pd.DataFrame(feats_coef, columns=["feature_col", "coef_val"])
feature_analysis["coef_val"] = feature_analysis["coef_val"] # .abs()
feature_analysis = feature_analysis.sort_values("coef_val", ascending=False)
feature_analysis.plot.barh(
x="feature_col", y="coef_val", title="Comparison of Features and Importance"
)
# select the top n features for use in final regression approach
best_n_features = feature_analysis.head(N_FEATURES_TO_USE_HEAD)["feature_col"].to_list()
# the opposite
if N_FEATURES_TO_USE_TAIL is not None:
worst_n_features = feature_analysis.tail(N_FEATURES_TO_USE_TAIL)[
"feature_col"
].to_list()
best_n_features.extend(worst_n_features)
# manually adding this categorical feature in
if "text_standard_category" not in best_n_features:
best_n_features.append("text_standard_category")
best_n_features = list(set(best_n_features))
train_data = train_data_stage_three[best_n_features]
DATASET = train_data.copy(deep=True)
DATASET["target"] = target_data_scaled["target"]
DATASET["id"] = label_data["id"]
temp_cols = list(
DATASET.drop(columns=["id", "target", "text_standard_category"]).columns
)
DATASET_scaled = DATASET[temp_cols]
scaler = StandardScaler()
DATASET_scaled = scaler.fit_transform(DATASET_scaled)
DATASET_scaled = pd.DataFrame(DATASET_scaled, columns=temp_cols)
DATASET_scaled[["id", "target", "text_standard_category"]] = DATASET[
["id", "target", "text_standard_category"]
]
print(DATASET_scaled)
Dataset = DATASET_scaled
# https://medium.com/optuna/lightgbm-tuner-new-optuna-integration-for-hyperparameter-optimization-8b7095e99258
# https://www.kaggle.com/corochann/optuna-tutorial-for-hyperparameter-optimization
RGR_MODELS = []
def objective(trial: optuna.trial.Trial, n_folds=5, shuffle=True):
params = {
"metric": "rmse",
"boosting_type": "gbdt",
"verbose": -1,
"num_leaves": trial.suggest_int("num_leaves", 4, 512),
"max_depth": trial.suggest_int("max_depth", 4, 512),
"max_bin": trial.suggest_int("max_bin", 4, 512),
"min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 64, 512),
"bagging_fraction": trial.suggest_uniform("bagging_fraction", 0.1, 1.0),
"bagging_freq": trial.suggest_int("max_bin", 5, 10),
"feature_fraction": trial.suggest_uniform("feature_fraction", 0.4, 1.0),
"learning_rate": trial.suggest_float("bagging_fraction", 0.0005, 0.01),
"n_estimators": trial.suggest_int("num_leaves", 10, 10000),
"lambda_l1": trial.suggest_loguniform("lambda_l1", 1e-8, 10.0),
"lambda_l2": trial.suggest_loguniform("lambda_l2", 1e-8, 10.0),
}
fold = KFold(
n_splits=n_folds, shuffle=shuffle, random_state=SEED_VAL if shuffle else None
)
valid_score = []
best_model_tracker = {}
for fold_idx, (train_idx, valid_idx) in enumerate(fold.split(range(len(DATASET)))):
train_data = (
Dataset.iloc[train_idx].drop(columns=["id", "target"]).copy(deep=True)
)
train_target = Dataset[["target"]].iloc[train_idx].copy(deep=True)
valid_data = (
Dataset.iloc[valid_idx].drop(columns=["id", "target"]).copy(deep=True)
)
valid_target = Dataset[["target"]].iloc[valid_idx].copy(deep=True)
lgbm_train = lgm.Dataset(
train_data,
label=train_target,
categorical_feature=["text_standard_category"],
)
lgbm_valid = lgm.Dataset(
valid_data,
label=valid_target,
categorical_feature=["text_standard_category"],
)
curr_model = lgm.train(
params,
train_set=lgbm_train,
valid_sets=[lgbm_train, lgbm_valid],
verbose_eval=-1,
)
valid_pred = curr_model.predict(
valid_data, num_iteration=curr_model.best_iteration
)
best_score = curr_model.best_score["valid_1"]["rmse"]
best_model_tracker.update({best_score: curr_model})
valid_score.append(best_score)
best_model_score = min([k for k, v in best_model_tracker.items()])
best_model = best_model_tracker[best_model_score]
RGR_MODELS.append(best_model)
# RGR_MODELS.append({best_model_score: best_model})
# worst_rgr_model_idx = max([d.keys[0] for d in RGR_MODELS])
# RGR_MODELS[worst_rgr_model_idx] = {best_model_score: None}
score = np.mean(valid_score)
return score
study = optuna.create_study(storage="sqlite:///lgm-study.db")
study.optimize(objective, n_trials=256)
plot_optimization_history(study).show()
print("Best Trial: ", study.best_trial, "\n")
# use the study parameters to create and train a lgbm regressor
lgm_train_data = DATASET_scaled.drop(columns=["id"]).copy(deep=True)
x_features = lgm_train_data.loc[:, lgm_train_data.columns != "target"]
y_train = lgm_train_data[["target"]]
lgm_train_set_full = lgm.Dataset(
data=x_features, categorical_feature=["text_standard_category"], label=y_train
)
gbm = lgm.train(
study.best_trial.params,
lgm_train_set_full,
)
model.do_decode = True
trainer = pl.Trainer(
gpus=ACCELERATOR_TYPE["gpus"], tpu_cores=ACCELERATOR_TYPE["tpu_cores"]
)
# run predict on the test data
submission_stage_1 = trainer.predict(
model=model, dataloaders=datamodule.predict_dataloader()
)
submission_stage_1 = pd.concat(submission_stage_1).reset_index(drop=True)
print(" Submission Stage 1: After RoBERTA\n")
print(submission_stage_1)
submission_stage_2 = pd.merge(
left=submission_stage_1,
right=datamodule.test_df,
left_on="id",
right_on="id",
how="left",
)
submission_stage_2 = add_textstat_features(submission_stage_2)
feature_cols = list(submission_stage_2.drop(columns=["id"]).copy(deep=True).columns)
predict_data = submission_stage_2.drop(columns=["id"]).copy(deep=True)
predict_data = predict_data[best_n_features]
temp_cols = list(predict_data.drop(columns=["text_standard_category"]).columns)
predict_data_scaled = predict_data[temp_cols]
predict_data_scaled = scaler.transform(predict_data_scaled)
predict_data_scaled = pd.DataFrame(predict_data_scaled, columns=temp_cols)
predict_data_scaled["text_standard_category"] = predict_data["text_standard_category"]
submission = submission_stage_2[["id"]].copy(deep=True)
submission["target"] = gbm.predict(predict_data_scaled)
submission["target"] = TARGET_SCALER.inverse_transform(submission["target"])
print(" Final Stage After LGBM\n")
print(submission)
submission.to_csv("submission.csv", index=False)
<|endoftext|> | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/393/69393429.ipynb | progresbar2local | justinchae | [{"Id": 69393429, "ScriptId": 18638229, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4319244, "CreationDate": "07/30/2021 12:40:32", "VersionNumber": 36.0, "Title": "The Bernstein Bears CRP Submission 1", "EvaluationDate": "07/30/2021", "IsChange": true, "TotalLines": 887.0, "LinesInsertedFromPrevious": 13.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 874.0, "LinesInsertedFromFork": 409.0, "LinesDeletedFromFork": 274.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 478.0, "TotalVotes": 0}] | [{"Id": 92503477, "KernelVersionId": 69393429, "SourceDatasetVersionId": 2311525}, {"Id": 92503478, "KernelVersionId": 69393429, "SourceDatasetVersionId": 2312589}, {"Id": 92503476, "KernelVersionId": 69393429, "SourceDatasetVersionId": 2311499}] | [{"Id": 2311525, "DatasetId": 1394642, "DatasourceVersionId": 2352908, "CreatorUserId": 4319244, "LicenseName": "Unknown", "CreationDate": "06/07/2021 14:51:02", "VersionNumber": 1.0, "Title": "progresbar2-local", "Slug": "progresbar2local", "Subtitle": "Downloaded for offline use in kaggle \"no internet\" kernels", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}] | [{"Id": 1394642, "CreatorUserId": 4319244, "OwnerUserId": 4319244.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2311525.0, "CurrentDatasourceVersionId": 2352908.0, "ForumId": 1413893, "Type": 2, "CreationDate": "06/07/2021 14:51:02", "LastActivityDate": "06/07/2021", "TotalViews": 934, "TotalDownloads": 4, "TotalVotes": 1, "TotalKernels": 3}] | [{"Id": 4319244, "UserName": "justinchae", "DisplayName": "Justin Chae", "RegisterDate": "01/12/2020", "PerformanceTier": 1}] | # # The Bernstein Bears CRP Submission 1
# install necessary libraries from input
# import progressbar library for offline usage
# import text stat library for additional ml data prep
FAST_DEV_RUN = False
USE_CHECKPOINT = True
USE_HIDDEN_IN_RGR = False
N_FEATURES_TO_USE_HEAD = 1
N_FEATURES_TO_USE_TAIL = None
# in this kernel, run train on all data to maximize score on held out data but use what we learned about optimal parameters
# set to 16 bit precision to cut compute requirements/increase batch size capacity
USE_16_BIT_PRECISION = True
# set a seed value for consistent experimentation; optional, else leave as None
SEED_VAL = 42
# set a train-validation split, .7 means 70% of train data and 30% to validation set
TRAIN_VALID_SPLIT = 0.8 # if None, then don't split
# set hyperparameters learned from tuning: https://www.kaggle.com/justinchae/tune-roberta-pytorch-lightning-optuna
MAX_EPOCHS = 4
BATCH_SIZE = 16
GRADIENT_CLIP_VAL = 0.18318092164684585
LEARNING_RATE = 3.613894271216525e-05
TOKENIZER_MAX_LEN = 363
WARMUP_STEPS = 292
WEIGHT_DECAY = 0.004560699842170359
import kaggle_config
from kaggle_config import (
WORKFLOW_ROOT,
DATA_PATH,
CACHE_PATH,
FIG_PATH,
MODEL_PATH,
ANALYSIS_PATH,
KAGGLE_INPUT,
CHECKPOINTS_PATH,
LOGS_PATH,
)
INPUTS, DEVICE = kaggle_config.run()
KAGGLE_TRAIN_PATH = kaggle_config.get_train_path(INPUTS)
KAGGLE_TEST_PATH = kaggle_config.get_test_path(INPUTS)
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.tuner.batch_size_scaling import scale_batch_size
from pytorch_lightning.tuner.lr_finder import _LRFinder, lr_find
import torchmetrics
import optuna
from optuna.integration import PyTorchLightningPruningCallback
from optuna.samplers import TPESampler, RandomSampler, CmaEsSampler
from optuna.visualization import (
plot_intermediate_values,
plot_optimization_history,
plot_param_importances,
)
import optuna.integration.lightgbm as lgb
import lightgbm as lgm
from sklearn.model_selection import (
KFold,
cross_val_score,
RepeatedKFold,
train_test_split,
)
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.decomposition import PCA
from sklearn.feature_selection import (
RFE,
f_regression,
mutual_info_regression,
SequentialFeatureSelector,
)
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
import math
import textstat
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataset import random_split
import tensorflow as tf
from transformers import (
RobertaForSequenceClassification,
RobertaTokenizer,
AdamW,
get_linear_schedule_with_warmup,
)
import os
import pandas as pd
import numpy as np
import gc
from functools import partial
from typing import List, Dict
from typing import Optional
from argparse import ArgumentParser
import random
if SEED_VAL:
random.seed(SEED_VAL)
np.random.seed(SEED_VAL)
seed_everything(SEED_VAL)
NUM_DATALOADER_WORKERS = os.cpu_count()
try:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="")
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
n_tpus = len(tf.config.list_logical_devices("TPU"))
except ValueError:
n_tpus = 0
ACCELERATOR_TYPE = {}
ACCELERATOR_TYPE.update(
{"gpus": torch.cuda.device_count() if torch.cuda.is_available() else None}
)
ACCELERATOR_TYPE.update({"tpu_cores": n_tpus if n_tpus > 0 else None})
# still debugging how to best toggle between tpu and gpu; there's too much code to configure to work simply
print("ACCELERATOR_TYPE:\n", ACCELERATOR_TYPE)
PRETTRAINED_ROBERTA_BASE_MODEL_PATH = "/kaggle/input/pre-trained-roberta-base"
PRETRAINED_ROBERTA_BASE_TOKENIZER_PATH = "/kaggle/input/tokenizer-roberta"
PRETRAINED_ROBERTA_BASE_TOKENIZER = RobertaTokenizer.from_pretrained(
PRETRAINED_ROBERTA_BASE_TOKENIZER_PATH
)
TUNED_CHECKPOINT_PATH = "/kaggle/input/best-crp-ckpt-4/crp_roberta_trial_4.ckpt"
# from: https://www.kaggle.com/justinchae/crp-regression-with-roberta-and-lightgbm
TUNED_BEST_ROBERTA_PATH = "/kaggle/input/my-best-tuned-roberta"
"""Implementing Lightning instead of torch.nn.Module
"""
class LitRobertaLogitRegressor(pl.LightningModule):
def __init__(
self,
pre_trained_path: str,
output_hidden_states: bool = False,
num_labels: int = 1,
layer_1_output_size: int = 64,
layer_2_output_size: int = 1,
learning_rate: float = 1e-5,
task_name: Optional[str] = None,
warmup_steps: int = 100,
weight_decay: float = 0.0,
adam_epsilon: float = 1e-8,
batch_size: Optional[int] = None,
train_size: Optional[int] = None,
max_epochs: Optional[int] = None,
n_gpus: Optional[int] = 0,
n_tpus: Optional[int] = 0,
accumulate_grad_batches=None,
tokenizer=None,
do_decode=False,
):
"""refactored from: https://www.kaggle.com/justinchae/my-bert-tuner and https://www.kaggle.com/justinchae/roberta-tuner"""
super(LitRobertaLogitRegressor, self).__init__()
# this saves class params as self.hparams
self.save_hyperparameters()
self.model = RobertaForSequenceClassification.from_pretrained(
self.hparams.pre_trained_path,
output_hidden_states=self.hparams.output_hidden_states,
num_labels=self.hparams.num_labels,
)
self.accelerator_multiplier = n_gpus if n_gpus > 0 else 1
self.config = self.model.config
self.parameters = self.model.parameters
self.save_pretrained = self.model.save_pretrained
# these layers are not currently used, tbd in future iteration
self.layer_1 = torch.nn.Linear(768, layer_1_output_size)
self.layer_2 = torch.nn.Linear(layer_1_output_size, layer_2_output_size)
self.tokenizer = tokenizer
self.do_decode = do_decode
self.output_hidden_states = output_hidden_states
def rmse_loss(x, y):
criterion = F.mse_loss
loss = torch.sqrt(criterion(x, y))
return loss
# TODO: enable toggle for various loss funcs and torchmetrics package
self.loss_func = rmse_loss
# self.eval_func = rmse_loss
def setup(self, stage=None) -> None:
if stage == "fit":
# when this class is called by trainer.fit, this stage runs and so on
# Calculate total steps
tb_size = self.hparams.batch_size * self.accelerator_multiplier
ab_size = self.hparams.accumulate_grad_batches * float(
self.hparams.max_epochs
)
self.total_steps = (self.hparams.train_size // tb_size) // ab_size
def extract_logit_only(self, input_ids, attention_mask) -> float:
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
logit = output.logits
logit = logit.cpu().numpy().astype(float)
return logit
def extract_hidden_only(self, input_ids, attention_mask) -> np.array:
output = self.model(input_ids=input_ids, attention_mask=input_ids)
hidden_states = output.hidden_states
x = torch.stack(hidden_states[-4:]).sum(0)
m1 = torch.nn.Sequential(self.layer_1, self.layer_2, torch.nn.Flatten())
x = m1(x)
x = torch.squeeze(x).cpu().numpy()
return x
def forward(self, input_ids, attention_mask) -> torch.Tensor:
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
x = output.logits
return x
def training_step(self, batch, batch_idx: int) -> float:
# refactored from: https://www.kaggle.com/justinchae/epoch-utils
labels, encoded_batch, kaggle_ids = batch
input_ids = encoded_batch["input_ids"]
attention_mask = encoded_batch["attention_mask"]
# per docs, keep train step separate from forward call
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
y_hat = output.logits
# quick reshape to align labels to predictions
labels = labels.view(-1, 1)
loss = self.loss_func(y_hat, labels)
self.log("train_loss", loss)
return loss
def validation_step(self, batch, batch_idx: int) -> float:
# refactored from: https://www.kaggle.com/justinchae/epoch-utils
labels, encoded_batch, kaggle_ids = batch
input_ids = encoded_batch["input_ids"]
attention_mask = encoded_batch["attention_mask"]
# this self call is calling the forward method
y_hat = self(input_ids, attention_mask)
# quick reshape to align labels to predictions
labels = labels.view(-1, 1)
loss = self.loss_func(y_hat, labels)
self.log("val_loss", loss)
return loss
def predict(self, batch, batch_idx: int, dataloader_idx: int = None):
# creating this predict method overrides the pl predict method
target, encoded_batch, kaggle_ids = batch
input_ids = encoded_batch["input_ids"]
attention_mask = encoded_batch["attention_mask"]
# this self call is calling the forward method
y_hat = self(input_ids, attention_mask)
# convert to numpy then list like struct to zip with ids
y_hat = y_hat.cpu().numpy().ravel()
# customizing the predict behavior to account for unique ids
if self.tokenizer is not None and self.do_decode:
target = target.cpu().numpy().ravel() if len(target) > 0 else None
excerpt = self.tokenizer.batch_decode(
input_ids.cpu().numpy(),
skip_special_tokens=True,
clean_up_tokenization_spaces=True,
)
if self.output_hidden_states:
hidden_states = self.extract_hidden_only(
input_ids=input_ids, attention_mask=attention_mask
)
else:
hidden_states = None
if target is not None:
predictions = list(
zip(
kaggle_ids,
target,
y_hat
# , hidden_states
)
)
predictions = pd.DataFrame(
predictions,
columns=[
"id",
"target",
"logit"
# , 'hidden_states'
],
)
else:
predictions = list(
zip(
kaggle_ids,
y_hat
# , hidden_states
)
)
predictions = pd.DataFrame(
predictions,
columns=[
"id",
"logit"
# , 'hidden_states'
],
)
else:
predictions = list(zip(kaggle_ids, y_hat))
predictions = pd.DataFrame(predictions, columns=["id", "target"])
return predictions
def configure_optimizers(self) -> torch.optim.Optimizer:
# Reference: https://pytorch-lightning.readthedocs.io/en/latest/notebooks/lightning_examples/text-transformers.html
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.hparams.learning_rate,
eps=self.hparams.adam_epsilon,
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.hparams.warmup_steps,
num_training_steps=self.total_steps,
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return [optimizer], [scheduler]
def my_collate_fn(
batch,
tokenizer,
max_length: int = 100,
return_tensors: str = "pt",
padding: str = "max_length",
truncation: bool = True,
):
# source: https://www.kaggle.com/justinchae/nn-utils
labels = []
batch_texts = []
kaggle_ids = []
for _label, batch_text, kaggle_id in batch:
if _label is not None:
labels.append(_label)
batch_texts.append(batch_text)
kaggle_ids.append(kaggle_id)
if _label is not None:
labels = torch.tensor(labels, dtype=torch.float)
encoded_batch = tokenizer(
batch_texts,
return_tensors=return_tensors,
padding=padding,
max_length=max_length,
truncation=truncation,
)
return labels, encoded_batch, kaggle_ids
class CommonLitDataset(Dataset):
def __init__(
self,
df,
text_col: str = "excerpt",
label_col: str = "target",
kaggle_id: str = "id",
sample_size: Optional[str] = None,
):
self.df = df if sample_size is None else df.sample(sample_size)
self.text_col = text_col
self.label_col = label_col
self.kaggle_id = kaggle_id
self.num_labels = (
len(df[label_col].unique()) if label_col in df.columns else None
)
# source: https://www.kaggle.com/justinchae/nn-utils
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
result = None
text = self.df.iloc[idx][self.text_col]
kaggle_id = self.df.iloc[idx][self.kaggle_id]
if "target" in self.df.columns:
target = self.df.iloc[idx][self.label_col]
return target, text, kaggle_id
else:
return None, text, kaggle_id
class CommonLitDataModule(pl.LightningDataModule):
def __init__(
self,
tokenizer,
train_path,
collate_fn=None,
max_length: int = 280,
batch_size: int = 16,
valid_path: Optional[str] = None,
test_path: Optional[str] = None,
train_valid_split: float = 0.6,
dtypes=None,
shuffle_dataloader: bool = True,
num_dataloader_workers: int = NUM_DATALOADER_WORKERS,
kfold: Optional[dict] = None,
):
super(CommonLitDataModule, self).__init__()
self.tokenizer = tokenizer
self.train_path = train_path
self.valid_path = valid_path
self.test_path = test_path
self.train_valid_split = train_valid_split
self.dtypes = {"id": str} if dtypes is None else dtypes
self.train_size = None
self.train_df, self.train_data = None, None
self.valid_df, self.valid_data = None, None
self.test_df, self.test_data = None, None
if collate_fn is not None:
self.collate_fn = partial(
collate_fn, tokenizer=tokenizer, max_length=max_length
)
else:
self.collate_fn = partial(
my_collate_fn, batch=batch_size, tokenizer=tokenizer
)
self.shuffle_dataloader = shuffle_dataloader
self.batch_size = batch_size
self.num_dataloader_workers = num_dataloader_workers
# refactored from: https://www.kaggle.com/justinchae/nn-utils
def _strip_extraneous(self, df):
strip_cols = ["url_legal", "license"]
if all(col in df.columns for col in strip_cols):
extraneous_data = strip_cols
return df.drop(columns=extraneous_data)
else:
return df
def prepare(self, prep_type=None):
if prep_type == "train":
# creates just an instance of the train data as a pandas df
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
if prep_type == "train_stage_2":
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
self.train_size = int(len(self.train_df))
self.train_data = CommonLitDataset(df=self.train_df)
def setup(self, stage: Optional[str] = None) -> None:
if stage == "fit":
# when this class is called by trainer.fit, this stage runs and so on
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
self.train_size = int(len(self.train_df))
self.train_data = CommonLitDataset(df=self.train_df)
if self.train_valid_split is not None and self.valid_path is None:
self.train_size = int(len(self.train_df) * self.train_valid_split)
self.train_data, self.valid_data = random_split(
self.train_data,
[self.train_size, len(self.train_df) - self.train_size],
)
elif self.valid_path is not None:
self.valid_df = (
self.valid_path
if isinstance(self.valid_path, pd.DataFrame)
else pd.read_csv(self.valid_path, dtype=self.dtypes)
)
self.valid_data = CommonLitDataset(df=self.valid_df)
if stage == "predict":
self.test_df = (
self.test_path
if isinstance(self.test_path, pd.DataFrame)
else pd.read_csv(self.test_path, dtype=self.dtypes)
)
self.test_df = self._strip_extraneous(self.test_df)
self.test_data = CommonLitDataset(df=self.test_df)
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
self.train_size = int(len(self.train_df))
self.train_data = CommonLitDataset(df=self.train_df)
def kfold_data(self):
# TODO: wondering how to integrate kfolds into the datamodule
pass
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_data,
batch_size=self.batch_size,
shuffle=self.shuffle_dataloader,
collate_fn=self.collate_fn,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
def val_dataloader(self) -> DataLoader:
if self.valid_data is None:
return None
else:
return DataLoader(
self.valid_data,
batch_size=self.batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
def predict_dataloader(self) -> DataLoader:
if self.test_data is None:
return None
else:
return DataLoader(
self.test_data,
batch_size=self.batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
def add_textstat_features(df):
# adding the text standard seems to boost the accuracy score a bit
df["text_standard"] = df["excerpt"].apply(lambda x: textstat.text_standard(x))
df["text_standard_category"] = df["text_standard"].astype("category").cat.codes
# counting ratio of difficult words by lexicon count
df["difficult_words_ratio"] = df["excerpt"].apply(
lambda x: textstat.difficult_words(x)
)
df["difficult_words_ratio"] = df.apply(
lambda x: x["difficult_words_ratio"] / textstat.lexicon_count(x["excerpt"]),
axis=1,
)
df["syllable_ratio"] = df["excerpt"].apply(lambda x: textstat.syllable_count(x))
df["syllable_ratio"] = df.apply(
lambda x: x["syllable_ratio"] / textstat.lexicon_count(x["excerpt"]), axis=1
)
### You can add/remove any feature below and it will be used in training and test
df["coleman_liau_index"] = df["excerpt"].apply(
lambda x: textstat.coleman_liau_index(x)
)
df["flesch_reading_ease"] = df["excerpt"].apply(
lambda x: textstat.flesch_reading_ease(x)
)
df["smog_index"] = df["excerpt"].apply(lambda x: textstat.smog_index(x))
df["gunning_fog"] = df["excerpt"].apply(lambda x: textstat.gunning_fog(x))
df["flesch_kincaid_grade"] = df["excerpt"].apply(
lambda x: textstat.flesch_kincaid_grade(x)
)
df["automated_readability_index"] = df["excerpt"].apply(
lambda x: textstat.automated_readability_index(x)
)
df["dale_chall_readability_score"] = df["excerpt"].apply(
lambda x: textstat.dale_chall_readability_score(x)
)
df["linsear_write_formula"] = df["excerpt"].apply(
lambda x: textstat.linsear_write_formula(x)
)
###
df = df.drop(columns=["excerpt", "text_standard"])
return df
def process_hidden_states(df, drop_hidden_states=False):
# for convenience, moving hidden states to the far right of the df
if drop_hidden_states:
df.drop(columns=["hidden_states"], inplace=True)
return df
elif "hidden_states" in df.columns:
df["hidden_state"] = df["hidden_states"]
df.drop(columns=["hidden_states"], inplace=True)
temp = df["hidden_state"].apply(pd.Series)
temp = temp.rename(columns=lambda x: "hidden_state_" + str(x))
df = pd.concat([df, temp], axis=1)
df.drop(columns=["hidden_state"], inplace=True)
return df
else:
print("hidden_states not found in dataframe, skipping process_hidden_states")
return df
datamodule = CommonLitDataModule(
collate_fn=my_collate_fn,
tokenizer=PRETRAINED_ROBERTA_BASE_TOKENIZER,
train_path=KAGGLE_TRAIN_PATH,
test_path=KAGGLE_TEST_PATH,
max_length=TOKENIZER_MAX_LEN,
batch_size=BATCH_SIZE,
train_valid_split=TRAIN_VALID_SPLIT,
)
# manually calling this stage since we need some params to set up model initially
datamodule.setup(stage="fit")
if USE_CHECKPOINT:
# model = LitRobertaLogitRegressor.load_from_checkpoint(TUNED_CHECKPOINT_PATH)
trainer = pl.Trainer(
gpus=ACCELERATOR_TYPE["gpus"], tpu_cores=ACCELERATOR_TYPE["tpu_cores"]
)
model = LitRobertaLogitRegressor(
pre_trained_path=TUNED_BEST_ROBERTA_PATH,
train_size=datamodule.train_size,
batch_size=datamodule.batch_size,
output_hidden_states=USE_HIDDEN_IN_RGR,
n_gpus=ACCELERATOR_TYPE["gpus"],
accumulate_grad_batches=trainer.accumulate_grad_batches,
learning_rate=LEARNING_RATE,
warmup_steps=WARMUP_STEPS,
max_epochs=MAX_EPOCHS,
tokenizer=datamodule.tokenizer,
)
trainer = pl.Trainer(
gpus=ACCELERATOR_TYPE["gpus"], tpu_cores=ACCELERATOR_TYPE["tpu_cores"]
)
else:
checkpoint_filename = f"crp_roberta_trial_main"
checkpoint_save = ModelCheckpoint(
dirpath=CHECKPOINTS_PATH, filename=checkpoint_filename
)
early_stopping_callback = EarlyStopping(monitor="val_loss", patience=2)
trainer = pl.Trainer(
max_epochs=MAX_EPOCHS,
gpus=ACCELERATOR_TYPE["gpus"],
tpu_cores=ACCELERATOR_TYPE["tpu_cores"],
precision=16 if USE_16_BIT_PRECISION else 32,
default_root_dir=CHECKPOINTS_PATH,
gradient_clip_val=GRADIENT_CLIP_VAL,
stochastic_weight_avg=True,
callbacks=[checkpoint_save, early_stopping_callback],
fast_dev_run=FAST_DEV_RUN,
)
model = LitRobertaLogitRegressor(
pre_trained_path=PRETTRAINED_ROBERTA_BASE_MODEL_PATH,
train_size=datamodule.train_size,
batch_size=datamodule.batch_size,
n_gpus=trainer.gpus,
n_tpus=trainer.tpu_cores,
max_epochs=trainer.max_epochs,
accumulate_grad_batches=trainer.accumulate_grad_batches,
learning_rate=LEARNING_RATE,
warmup_steps=WARMUP_STEPS,
tokenizer=datamodule.tokenizer,
)
trainer.fit(model, datamodule=datamodule)
# let's also save the tuned roberta state which our model wraps around
model_file_name = f"tuned_roberta_model"
model_file_path = os.path.join(MODEL_PATH, model_file_name)
model.save_pretrained(model_file_path)
# clean up memory
torch.cuda.empty_cache()
gc.collect()
# freeze the model for prediction
model.eval()
model.freeze()
datamodule.setup(stage="predict")
model.do_decode = True
# run predict on the test data
train_data_stage_two = trainer.predict(
model=model, dataloaders=datamodule.train_dataloader()
)
train_data_stage_two = pd.concat(train_data_stage_two).reset_index(drop=True)
train_data_stage_two = pd.merge(
left=train_data_stage_two,
right=datamodule.train_df.drop(columns=["standard_error", "target"]),
left_on="id",
right_on="id",
)
print(train_data_stage_two)
# TODO: test whether we need to save and upload the fine-tuned state of roberta or if pytorch lightning checkpoints take care of it all
train_data_stage_three = add_textstat_features(train_data_stage_two)
label_data = train_data_stage_three[["id"]].copy(deep=True)
train_data = train_data_stage_three.drop(
columns=["id", "target", "text_standard_category"]
).copy(deep=True)
train_data_cols = list(train_data.columns)
target_data = train_data_stage_three[["target"]].copy(deep=True)
scaler = StandardScaler()
train_data_scaled = scaler.fit_transform(train_data)
train_data_scaled = pd.DataFrame(train_data_scaled, columns=train_data_cols)
TARGET_SCALER = StandardScaler()
target_data_scaled = TARGET_SCALER.fit_transform(target_data)
target_data_scaled = pd.DataFrame(target_data_scaled, columns=["target"])
regr = SVR(kernel="linear")
regr.fit(train_data_scaled, target_data_scaled["target"])
print(" Assessment of Features ")
print("R2 Score: ", regr.score(train_data_scaled, target_data_scaled["target"]))
print(
"RSME Score: ",
math.sqrt(
mean_squared_error(
target_data_scaled["target"], regr.predict(train_data_scaled)
)
),
)
# regr.coef_ is a array of n, 1
feats_coef = list(zip(train_data_cols, regr.coef_[0]))
feature_analysis = pd.DataFrame(feats_coef, columns=["feature_col", "coef_val"])
feature_analysis["coef_val"] = feature_analysis["coef_val"] # .abs()
feature_analysis = feature_analysis.sort_values("coef_val", ascending=False)
feature_analysis.plot.barh(
x="feature_col", y="coef_val", title="Comparison of Features and Importance"
)
# select the top n features for use in final regression approach
best_n_features = feature_analysis.head(N_FEATURES_TO_USE_HEAD)["feature_col"].to_list()
# the opposite
if N_FEATURES_TO_USE_TAIL is not None:
worst_n_features = feature_analysis.tail(N_FEATURES_TO_USE_TAIL)[
"feature_col"
].to_list()
best_n_features.extend(worst_n_features)
# manually adding this categorical feature in
if "text_standard_category" not in best_n_features:
best_n_features.append("text_standard_category")
best_n_features = list(set(best_n_features))
train_data = train_data_stage_three[best_n_features]
DATASET = train_data.copy(deep=True)
DATASET["target"] = target_data_scaled["target"]
DATASET["id"] = label_data["id"]
temp_cols = list(
DATASET.drop(columns=["id", "target", "text_standard_category"]).columns
)
DATASET_scaled = DATASET[temp_cols]
scaler = StandardScaler()
DATASET_scaled = scaler.fit_transform(DATASET_scaled)
DATASET_scaled = pd.DataFrame(DATASET_scaled, columns=temp_cols)
DATASET_scaled[["id", "target", "text_standard_category"]] = DATASET[
["id", "target", "text_standard_category"]
]
print(DATASET_scaled)
Dataset = DATASET_scaled
# https://medium.com/optuna/lightgbm-tuner-new-optuna-integration-for-hyperparameter-optimization-8b7095e99258
# https://www.kaggle.com/corochann/optuna-tutorial-for-hyperparameter-optimization
RGR_MODELS = []
def objective(trial: optuna.trial.Trial, n_folds=5, shuffle=True):
params = {
"metric": "rmse",
"boosting_type": "gbdt",
"verbose": -1,
"num_leaves": trial.suggest_int("num_leaves", 4, 512),
"max_depth": trial.suggest_int("max_depth", 4, 512),
"max_bin": trial.suggest_int("max_bin", 4, 512),
"min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 64, 512),
"bagging_fraction": trial.suggest_uniform("bagging_fraction", 0.1, 1.0),
"bagging_freq": trial.suggest_int("max_bin", 5, 10),
"feature_fraction": trial.suggest_uniform("feature_fraction", 0.4, 1.0),
"learning_rate": trial.suggest_float("bagging_fraction", 0.0005, 0.01),
"n_estimators": trial.suggest_int("num_leaves", 10, 10000),
"lambda_l1": trial.suggest_loguniform("lambda_l1", 1e-8, 10.0),
"lambda_l2": trial.suggest_loguniform("lambda_l2", 1e-8, 10.0),
}
fold = KFold(
n_splits=n_folds, shuffle=shuffle, random_state=SEED_VAL if shuffle else None
)
valid_score = []
best_model_tracker = {}
for fold_idx, (train_idx, valid_idx) in enumerate(fold.split(range(len(DATASET)))):
train_data = (
Dataset.iloc[train_idx].drop(columns=["id", "target"]).copy(deep=True)
)
train_target = Dataset[["target"]].iloc[train_idx].copy(deep=True)
valid_data = (
Dataset.iloc[valid_idx].drop(columns=["id", "target"]).copy(deep=True)
)
valid_target = Dataset[["target"]].iloc[valid_idx].copy(deep=True)
lgbm_train = lgm.Dataset(
train_data,
label=train_target,
categorical_feature=["text_standard_category"],
)
lgbm_valid = lgm.Dataset(
valid_data,
label=valid_target,
categorical_feature=["text_standard_category"],
)
curr_model = lgm.train(
params,
train_set=lgbm_train,
valid_sets=[lgbm_train, lgbm_valid],
verbose_eval=-1,
)
valid_pred = curr_model.predict(
valid_data, num_iteration=curr_model.best_iteration
)
best_score = curr_model.best_score["valid_1"]["rmse"]
best_model_tracker.update({best_score: curr_model})
valid_score.append(best_score)
best_model_score = min([k for k, v in best_model_tracker.items()])
best_model = best_model_tracker[best_model_score]
RGR_MODELS.append(best_model)
# RGR_MODELS.append({best_model_score: best_model})
# worst_rgr_model_idx = max([d.keys[0] for d in RGR_MODELS])
# RGR_MODELS[worst_rgr_model_idx] = {best_model_score: None}
score = np.mean(valid_score)
return score
study = optuna.create_study(storage="sqlite:///lgm-study.db")
study.optimize(objective, n_trials=256)
plot_optimization_history(study).show()
print("Best Trial: ", study.best_trial, "\n")
# use the study parameters to create and train a lgbm regressor
lgm_train_data = DATASET_scaled.drop(columns=["id"]).copy(deep=True)
x_features = lgm_train_data.loc[:, lgm_train_data.columns != "target"]
y_train = lgm_train_data[["target"]]
lgm_train_set_full = lgm.Dataset(
data=x_features, categorical_feature=["text_standard_category"], label=y_train
)
gbm = lgm.train(
study.best_trial.params,
lgm_train_set_full,
)
model.do_decode = True
trainer = pl.Trainer(
gpus=ACCELERATOR_TYPE["gpus"], tpu_cores=ACCELERATOR_TYPE["tpu_cores"]
)
# run predict on the test data
submission_stage_1 = trainer.predict(
model=model, dataloaders=datamodule.predict_dataloader()
)
submission_stage_1 = pd.concat(submission_stage_1).reset_index(drop=True)
print(" Submission Stage 1: After RoBERTA\n")
print(submission_stage_1)
submission_stage_2 = pd.merge(
left=submission_stage_1,
right=datamodule.test_df,
left_on="id",
right_on="id",
how="left",
)
submission_stage_2 = add_textstat_features(submission_stage_2)
feature_cols = list(submission_stage_2.drop(columns=["id"]).copy(deep=True).columns)
predict_data = submission_stage_2.drop(columns=["id"]).copy(deep=True)
predict_data = predict_data[best_n_features]
temp_cols = list(predict_data.drop(columns=["text_standard_category"]).columns)
predict_data_scaled = predict_data[temp_cols]
predict_data_scaled = scaler.transform(predict_data_scaled)
predict_data_scaled = pd.DataFrame(predict_data_scaled, columns=temp_cols)
predict_data_scaled["text_standard_category"] = predict_data["text_standard_category"]
submission = submission_stage_2[["id"]].copy(deep=True)
submission["target"] = gbm.predict(predict_data_scaled)
submission["target"] = TARGET_SCALER.inverse_transform(submission["target"])
print(" Final Stage After LGBM\n")
print(submission)
submission.to_csv("submission.csv", index=False)
| false | 0 | 9,748 | 0 | 27 | 9,748 |
||
69716135 | "<kaggle_start><data_title>ResNet-50<data_description># ResNet-50\n\n---\n\n## Deep Residual Learnin(...TRUNCATED) | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/716/69716135.ipynb | resnet50 | null | "[{\"Id\": 69716135, \"ScriptId\": 18933277, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) | "[{\"Id\": 93188012, \"KernelVersionId\": 69716135, \"SourceDatasetVersionId\": 9900}, {\"Id\": 9318(...TRUNCATED) | "[{\"Id\": 9900, \"DatasetId\": 6209, \"DatasourceVersionId\": 9900, \"CreatorUserId\": 484516, \"Li(...TRUNCATED) | "[{\"Id\": 6209, \"CreatorUserId\": 484516, \"OwnerUserId\": NaN, \"OwnerOrganizationId\": 1202.0, \(...TRUNCATED) | null | "# # Title:Skin-Lesion Segmentation\n# ### Importing the Libraries\nfrom keras.models import Model, (...TRUNCATED) | false | 0 | 23,035 | 0 | 608 | 23,035 |
||
69293649 | "<kaggle_start><code># 生成data文件\nimport os\nfrom os.path import join, isfile\nimport numpy a(...TRUNCATED) | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/293/69293649.ipynb | null | null | "[{\"Id\": 69293649, \"ScriptId\": 18546956, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) | null | null | null | null | "# 生成data文件\nimport os\nfrom os.path import join, isfile\nimport numpy as np\nimport h5py\nf(...TRUNCATED) | false | 0 | 19,374 | 0 | 6 | 19,374 |
||
69955597 | "<kaggle_start><code>from learntools.core import binder\n\nbinder.bind(globals())\nfrom learntools.p(...TRUNCATED) | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/955/69955597.ipynb | null | null | "[{\"Id\": 69955597, \"ScriptId\": 19129579, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) | null | null | null | null | "from learntools.core import binder\n\nbinder.bind(globals())\nfrom learntools.python.ex7 import *\n(...TRUNCATED) | false | 0 | 2,582 | 0 | 6 | 2,582 |
||
69728033 | "<kaggle_start><data_title>mlb_unnested<data_description>ref: https://www.kaggle.com/naotaka1128/cre(...TRUNCATED) | /fsx/loubna/kaggle_data/kaggle-code-data/data/0069/728/69728033.ipynb | mlb-unnested | naotaka1128 | "[{\"Id\": 69728033, \"ScriptId\": 19051813, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) | [{"Id": 93200478, "KernelVersionId": 69728033, "SourceDatasetVersionId": 2323733}] | "[{\"Id\": 2323733, \"DatasetId\": 1402611, \"DatasourceVersionId\": 2365235, \"CreatorUserId\": 164(...TRUNCATED) | "[{\"Id\": 1402611, \"CreatorUserId\": 164146, \"OwnerUserId\": 164146.0, \"OwnerOrganizationId\": N(...TRUNCATED) | "[{\"Id\": 164146, \"UserName\": \"naotaka1128\", \"DisplayName\": \"ML_Bear\", \"RegisterDate\": \"(...TRUNCATED) | "# ## About this notebook\n# + train on 2021 regular season data(use update data\n# + cv on may,2021(...TRUNCATED) | false | 1 | 15,688 | 2 | 56 | 15,688 |
||
87643680 | "<kaggle_start><data_title>OCTant project<data_name>octant-project\n<code># # Enhancing an open-sour(...TRUNCATED) | /fsx/loubna/kaggle_data/kaggle-code-data/data/0087/643/87643680.ipynb | octant-project | nwheeler443 | "[{\"Id\": 87643680, \"ScriptId\": 24617507, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) | [{"Id": 117988798, "KernelVersionId": 87643680, "SourceDatasetVersionId": 3175055}] | "[{\"Id\": 3175055, \"DatasetId\": 1929287, \"DatasourceVersionId\": 3224540, \"CreatorUserId\": 359(...TRUNCATED) | "[{\"Id\": 1929287, \"CreatorUserId\": 359577, \"OwnerUserId\": 359577.0, \"OwnerOrganizationId\": N(...TRUNCATED) | "[{\"Id\": 359577, \"UserName\": \"nwheeler443\", \"DisplayName\": \"Nicole Wheeler\", \"RegisterDat(...TRUNCATED) | "# # Enhancing an open-source OCT segmentation tool with manual segmentation capabilities\n# ## Intr(...TRUNCATED) | false | 0 | 24,379 | 0 | 23 | 24,379 |
||
87084457 | "<kaggle_start><code># # Objective of first fast YOLO inspired network\n# The first network will gen(...TRUNCATED) | /fsx/loubna/kaggle_data/kaggle-code-data/data/0087/084/87084457.ipynb | null | null | "[{\"Id\": 87084457, \"ScriptId\": 24134606, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) | null | null | null | null | "# # Objective of first fast YOLO inspired network\n# The first network will generate a square where(...TRUNCATED) | false | 0 | 3,597 | 0 | 6 | 3,597 |
||
87964295 | "<kaggle_start><code># !curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/en(...TRUNCATED) | /fsx/loubna/kaggle_data/kaggle-code-data/data/0087/964/87964295.ipynb | null | null | "[{\"Id\": 87964295, \"ScriptId\": 24700958, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) | null | null | null | null | "# !curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorc(...TRUNCATED) | false | 0 | 3,368 | 0 | 6 | 3,368 |
||
87597153 | "<kaggle_start><code># # HW2B: Neural Machine Translation\n# In this project, you will build a neura(...TRUNCATED) | /fsx/loubna/kaggle_data/kaggle-code-data/data/0087/597/87597153.ipynb | null | null | "[{\"Id\": 87597153, \"ScriptId\": 24604634, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) | null | null | null | null | "# # HW2B: Neural Machine Translation\n# In this project, you will build a neural machine translatio(...TRUNCATED) | false | 0 | 8,157 | 0 | 6 | 8,157 |
||
87030978 | "<kaggle_start><code>import numpy as np # linear algebra\nimport pandas as pd # data processing, C(...TRUNCATED) | /fsx/loubna/kaggle_data/kaggle-code-data/data/0087/030/87030978.ipynb | null | null | "[{\"Id\": 87030978, \"ScriptId\": 23915017, \"ParentScriptVersionId\": NaN, \"ScriptLanguageId\": 9(...TRUNCATED) | null | null | null | null | "import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd(...TRUNCATED) | false | 0 | 5,933 | 0 | 6 | 5,933 |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 41