python_code
stringlengths 0
34.9k
|
---|
from distutils.core import setup
from setuptools import find_packages
# When publishing the Docker image, a script checks for the first line with "version" and an equals sign to get the version.
version='1.0.0'
install_requires = [
'bokeh>=0.13',
'expiringdict>=1.1.4',
'injector>=0.16.2',
'joblib>=0.13.2',
'keras>=2.3',
'mmh3~=3.0.0',
'numpy',
# Required for saving plots.
'selenium>=3.141.0',
'scikit-multiflow>=0.3.0',
'spacy>=2.2',
'tqdm>=4.19',
]
test_deps = [
'pytest',
]
setup(
name='decai',
version=version,
packages=find_packages(),
url='https://github.com/microsoft/0xDeCA10B',
license='MIT',
author="Justin D. Harris",
author_email='',
description="Simulate Decentralized & Collaborative AI for Sharing Updatable Models.",
install_requires=install_requires,
tests_require=test_deps,
extras_require=dict(
test=test_deps,
),
)
|
import json
from collections import defaultdict
from dataclasses import dataclass
from itertools import cycle
from logging import Logger
from operator import itemgetter
from pathlib import Path
from typing import List, Dict
from bokeh import colors
from bokeh.io import export_png
from bokeh.models import FuncTickFormatter, Legend, PrintfTickFormatter, AdaptiveTicker
from bokeh.plotting import figure, output_file
from injector import Injector, inject
from decai.simulation.logging_module import LoggingModule
from decai.simulation.simulate import Agent
@inject
@dataclass
class SimulationCombiner(object):
_logger: Logger
def combine(self, runs: List[Dict], img_save_path: str):
"""
Combine runs from several files.
:param paths: The paths to the runs to combine.
"""
output_file('combined_plots.html')
plot = figure(title="Balances & Accuracy on Hidden Test Set", )
plot.width = 800
plot.height = 800
plot.xaxis.axis_label = "Time (days)"
plot.yaxis.axis_label = "Percent"
plot.title.text_font_size = '20pt'
plot.xaxis.major_label_text_font_size = '16pt'
plot.xaxis.axis_label_text_font_size = '16pt'
plot.yaxis.major_label_text_font_size = '16pt'
plot.yaxis.axis_label_text_font_size = '16pt'
plot.xaxis[0].ticker = AdaptiveTicker(base=5 * 24 * 60 * 60)
plot.xgrid[0].ticker = AdaptiveTicker(base=24 * 60 * 60)
# JavaScript code.
plot.xaxis[0].formatter = FuncTickFormatter(code="""
return (tick / 86400).toFixed(0);
""")
plot.yaxis[0].formatter = PrintfTickFormatter(format="%0.1f%%")
# TODO Make plot wider (or maybe it's ok for the paper).
good_colors = cycle([
colors.named.green,
colors.named.lawngreen,
colors.named.darkgreen,
colors.named.limegreen,
])
bad_colors = cycle([
colors.named.red,
colors.named.darkred,
colors.named.orangered,
colors.named.indianred,
])
accuracy_colors = cycle([
colors.named.blue,
colors.named.cadetblue,
colors.named.cornflowerblue,
colors.named.darkblue,
])
baseline_accuracy_colors = cycle([
colors.named.black,
colors.named.darkgrey,
colors.named.slategrey,
colors.named.darkslategrey,
])
line_dashes = cycle([
'solid',
'dashed',
'dotted',
'dotdash',
'dashdot',
])
legend = []
for run in runs:
name = run['name']
path = run['path']
line_dash = next(line_dashes)
self._logger.info("Opening \"%s\".", path)
with open(path) as f:
data = json.load(f)
baseline_accuracy = data['baselineAccuracy']
if baseline_accuracy is not None:
self._logger.debug("Baseline accuracy: %s", baseline_accuracy)
r = plot.ray(x=[0], y=[baseline_accuracy * 100], length=0, angle=0, line_width=2,
line_dash=line_dash,
color=next(baseline_accuracy_colors))
legend.append((f"{name} accuracy when trained with all data: {baseline_accuracy * 100:0.1f}%", [r]))
agents: Dict[str, Agent] = dict()
for agent in data['agents']:
agent = Agent(**agent)
agents[agent.address] = agent
l = plot.line(x=[d['t'] for d in data['accuracies']],
y=[d['accuracy'] * 100 for d in data['accuracies']],
line_dash=line_dash,
line_width=2,
color=next(accuracy_colors),
)
legend.append((f"{name} Accuracy", [l]))
agent_balance_data = defaultdict(list)
for balance_data in data['balances']:
agent = balance_data['a']
agent_balance_data[agent].append(
(balance_data['t'], balance_data['b'] * 100 / agents[agent].start_balance))
for agent_id, balance_data in sorted(agent_balance_data.items(), key=itemgetter(0)):
agent = agents[agent_id]
if agent.good:
color = next(good_colors)
else:
color = next(bad_colors)
l = plot.line(x=list(map(itemgetter(0), balance_data)),
y=list(map(itemgetter(1), balance_data)),
line_dash=line_dash,
line_width=2,
color=color,
)
legend.append((f"{name} {agent.address} Agent Balance", [l]))
self._logger.info("Done going through runs.")
legend = Legend(items=legend, location='center_left')
plot.add_layout(legend, 'above')
plot.legend.label_text_font_size = '12pt'
self._logger.info("Saving image to: %s", img_save_path)
export_png(plot, img_save_path)
if __name__ == '__main__':
inj = Injector([
LoggingModule,
])
s = inj.get(SimulationCombiner)
path = Path(__file__, '../../..').resolve()
paths = dict(
fitness=dict(
nb=path / 'saved_runs/1578937397-fitness-nb.json',
ncc=path / 'saved_runs/1578938741-fitness-ncc.json',
perceptron=path / 'saved_runs/1578934493-fitness-perceptron.json',
),
imdb=dict(
nb=path / 'saved_runs/1580943847-imdb-nb-simulation_data.json',
ncc=path / 'saved_runs/1580945025-imdb-ncc-simulation_data.json',
perceptron=path / 'saved_runs/1580945565-imdb-perceptron-simulation_data.json',
),
news=dict(
nb=path / 'saved_runs/1580941815-news-nb-simulation_data.json',
ncc=path / 'saved_runs/1580941258-news-ncc-simulation_data.json',
perceptron=path / 'saved_runs/1580940494-news-perceptron-simulation_data.json',
),
)
for dataset in paths.keys():
s.combine([
dict(name="NB",
path=paths[dataset]['nb']
),
dict(name="NCC",
path=paths[dataset]['ncc']
),
dict(name="Perceptron",
path=paths[dataset]['perceptron']
),
],
path / f'saved_runs/combined-{dataset}.png')
|
import json
import logging
import os
import random
import time
from dataclasses import asdict, dataclass
from functools import partial
from itertools import cycle
from logging import Logger
from platform import uname
from queue import PriorityQueue
from threading import Thread
from typing import List
import numpy as np
from bokeh import colors
from bokeh.document import Document
from bokeh.io import export_png
from bokeh.models import AdaptiveTicker, ColumnDataSource, FuncTickFormatter, PrintfTickFormatter
from bokeh.plotting import curdoc, figure
from injector import inject
from tornado import gen
from tqdm import tqdm
from decai.simulation.contract.balances import Balances
from decai.simulation.contract.collab_trainer import CollaborativeTrainer
from decai.simulation.contract.incentive.prediction_market import MarketPhase, PredictionMarket
from decai.simulation.contract.objects import Address, Msg, RejectException, TimeMock
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.featuremapping.feature_index_mapper import FeatureIndexMapper
@dataclass
class Agent:
"""
A user to run in the simulator.
"""
address: Address
start_balance: float
mean_deposit: float
stdev_deposit: float
mean_update_wait_s: float
stdev_update_wait_time: float = 1
pay_to_call: float = 0
good: bool = True
prob_mistake: float = 0
calls_model: bool = False
def __post_init__(self):
assert self.start_balance > self.mean_deposit
def __lt__(self, other):
return self.address < other.address
def get_next_deposit(self) -> int:
while True:
result = int(random.normalvariate(self.mean_deposit, self.stdev_deposit))
if result > 0:
return result
def get_next_wait_s(self) -> int:
while True:
result = int(random.normalvariate(self.mean_update_wait_s, self.stdev_update_wait_time))
if result >= 1:
return result
class Simulator(object):
"""
A simulator for Decentralized & Collaborative AI.
"""
@inject
def __init__(self,
balances: Balances,
data_loader: DataLoader,
decai: CollaborativeTrainer,
feature_index_mapper: FeatureIndexMapper,
logger: Logger,
time_method: TimeMock,
):
self._balances = balances
self._data_loader = data_loader
self._decai = decai
self._feature_index_mapper = feature_index_mapper
self._logger = logger
self._time = time_method
self._warned_about_saving_plot = False
def save_plot_image(self, plot, plot_save_path):
try:
export_png(plot, filename=plot_save_path)
except Exception as e:
if self._warned_about_saving_plot:
return
show_error_details = True
message = "Could not save picture of the plot."
try:
# Check if in WSL.
show_error_details = not ('microsoft' in uname().release.lower())
except:
pass
if show_error_details:
self._logger.exception(message, exc_info=e)
else:
self._logger.warning(f"{message} %s", e)
self._warned_about_saving_plot = True
def simulate(self,
agents: List[Agent],
baseline_accuracy: float = None,
init_train_data_portion: float = 0.1,
pm_test_sets: list = None,
accuracy_plot_wait_s=2E5,
train_size: int = None, test_size: int = None,
filename_indicator: str = None
):
"""
Run a simulation.
:param agents: The agents that will interact with the data.
:param baseline_accuracy: The baseline accuracy of the model.
Usually the accuracy on a hidden test set when the model is trained with all data.
:param init_train_data_portion: The portion of the data to initially use for training. Must be [0,1].
:param pm_test_sets: The test sets for the prediction market incentive mechanism.
:param accuracy_plot_wait_s: The amount of time to wait in seconds between plotting the accuracy.
:param train_size: The amount of training data to use.
:param test_size: The amount of test data to use.
:param filename_indicator: Path of the filename to create for the run.
"""
assert 0 <= init_train_data_portion <= 1
# Data to save.
save_data = dict(agents=[asdict(a) for a in agents],
baselineAccuracy=baseline_accuracy,
initTrainDataPortion=init_train_data_portion,
accuracies=[],
balances=[],
)
time_for_filenames = int(time.time())
save_path = f'saved_runs/{time_for_filenames}-{filename_indicator}-simulation_data.json'
model_save_path = f'saved_runs/{time_for_filenames}-{filename_indicator}-model.json'
plot_save_path = f'saved_runs/{time_for_filenames}-{filename_indicator}.png'
self._logger.info("Saving run info to \"%s\".", save_path)
os.makedirs(os.path.dirname(save_path), exist_ok=True)
# Set up plots.
doc: Document = curdoc()
doc.title = "DeCAI Simulation"
plot = figure(title="Balances & Accuracy on Hidden Test Set",
)
plot.width = 800
plot.height = 600
plot.xaxis.axis_label = "Time (days)"
plot.yaxis.axis_label = "Percent"
plot.title.text_font_size = '20pt'
plot.xaxis.major_label_text_font_size = '20pt'
plot.xaxis.axis_label_text_font_size = '20pt'
plot.yaxis.major_label_text_font_size = '20pt'
plot.yaxis.axis_label_text_font_size = '20pt'
plot.xaxis[0].ticker = AdaptiveTicker(base=5 * 24 * 60 * 60)
plot.xgrid[0].ticker = AdaptiveTicker(base=24 * 60 * 60)
balance_plot_sources_per_agent = dict()
good_colors = cycle([
colors.named.green,
colors.named.lawngreen,
colors.named.darkgreen,
colors.named.limegreen,
])
bad_colors = cycle([
colors.named.red,
colors.named.darkred,
])
for agent in agents:
source = ColumnDataSource(dict(t=[], b=[]))
assert agent.address not in balance_plot_sources_per_agent
balance_plot_sources_per_agent[agent.address] = source
if agent.calls_model:
color = 'blue'
line_dash = 'dashdot'
elif agent.good:
color = next(good_colors)
line_dash = 'dotted'
else:
color = next(bad_colors)
line_dash = 'dashed'
plot.line(x='t', y='b',
line_dash=line_dash,
line_width=2,
source=source,
color=color,
legend=f"{agent.address} Balance")
plot.legend.location = 'top_left'
plot.legend.label_text_font_size = '12pt'
# JavaScript code.
plot.xaxis[0].formatter = FuncTickFormatter(code="""
return (tick / 86400).toFixed(0);
""")
plot.yaxis[0].formatter = PrintfTickFormatter(format="%0.1f%%")
acc_source = ColumnDataSource(dict(t=[], a=[]))
if baseline_accuracy is not None:
plot.ray(x=[0], y=[baseline_accuracy * 100], length=0, angle=0, line_width=2,
legend=f"Accuracy when trained with all data: {baseline_accuracy * 100:0.1f}%")
plot.line(x='t', y='a',
line_dash='solid',
line_width=2,
source=acc_source,
color='black',
legend="Current Accuracy")
@gen.coroutine
def plot_cb(agent: Agent, t, b):
source = balance_plot_sources_per_agent[agent.address]
source.stream(dict(t=[t], b=[b * 100 / agent.start_balance]))
save_data['balances'].append(dict(t=t, a=agent.address, b=b))
@gen.coroutine
def plot_accuracy_cb(t, a):
acc_source.stream(dict(t=[t], a=[a * 100]))
save_data['accuracies'].append(dict(t=t, accuracy=a))
continuous_evaluation = not isinstance(self._decai.im, PredictionMarket)
def task():
(x_train, y_train), (x_test, y_test) = \
self._data_loader.load_data(train_size=train_size, test_size=test_size)
classifications = self._data_loader.classifications()
x_train, x_test, feature_index_mapping = self._feature_index_mapper.map(x_train, x_test)
x_train_len = x_train.shape[0]
init_idx = int(x_train_len * init_train_data_portion)
self._logger.info("Initializing model with %d out of %d samples.",
init_idx, x_train_len)
x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]
save_model = isinstance(self._decai.im, PredictionMarket) and self._decai.im.reset_model_during_reward_phase
self._decai.model.init_model(x_init_data, y_init_data, save_model)
if self._logger.isEnabledFor(logging.DEBUG):
s = self._decai.model.evaluate(x_init_data, y_init_data)
self._logger.debug("Initial training data evaluation: %s", s)
if len(x_remaining) > 0:
s = self._decai.model.evaluate(x_remaining, y_remaining)
self._logger.debug("Remaining training data evaluation: %s", s)
else:
self._logger.debug("There is no more remaining data to evaluate.")
self._logger.info("Evaluating initial model.")
accuracy = self._decai.model.log_evaluation_details(x_test, y_test)
self._logger.info("Initial test set accuracy: %0.2f%%", accuracy * 100)
t = self._time()
doc.add_next_tick_callback(
partial(plot_accuracy_cb, t=t, a=accuracy))
q = PriorityQueue()
random.shuffle(agents)
for agent in agents:
self._balances.initialize(agent.address, agent.start_balance)
q.put((self._time() + agent.get_next_wait_s(), agent))
doc.add_next_tick_callback(
partial(plot_cb, agent=agent, t=t, b=agent.start_balance))
unclaimed_data = []
next_data_index = 0
next_accuracy_plot_time = 1E4
desc = "Processing agent requests"
current_time = 0
with tqdm(desc=desc,
unit_scale=True, mininterval=2, unit=" requests",
total=len(x_remaining),
) as pbar:
while not q.empty():
# For now assume sending a transaction (editing) is free (no gas)
# since it should be relatively cheaper than the deposit required to add data.
# It may not be cheaper than calling `report`.
if next_data_index >= len(x_remaining):
if not continuous_evaluation or len(unclaimed_data) == 0:
break
current_time, agent = q.get()
update_balance_plot = False
if current_time > next_accuracy_plot_time:
self._logger.debug("Evaluating.")
next_accuracy_plot_time += accuracy_plot_wait_s
accuracy = self._decai.model.evaluate(x_test, y_test)
doc.add_next_tick_callback(
partial(plot_accuracy_cb, t=current_time, a=accuracy))
if continuous_evaluation:
self._logger.debug("Unclaimed data: %d", len(unclaimed_data))
pbar.set_description(f"{desc} ({len(unclaimed_data)} unclaimed)")
with open(save_path, 'w') as f:
json.dump(save_data, f, separators=(',', ':'))
self._decai.model.export(model_save_path, classifications,
feature_index_mapping=feature_index_mapping)
if os.path.exists(plot_save_path):
os.remove(plot_save_path)
self.save_plot_image(plot, plot_save_path)
self._time.set_time(current_time)
balance = self._balances[agent.address]
if balance > 0 and next_data_index < len(x_remaining):
# Pick data.
x, y = x_remaining[next_data_index], y_remaining[next_data_index]
if agent.calls_model:
# Only call the model if it's good.
if random.random() < accuracy:
update_balance_plot = True
self._decai.predict(Msg(agent.address, agent.pay_to_call), x)
else:
if not agent.good:
y = 1 - y
if agent.prob_mistake > 0 and random.random() < agent.prob_mistake:
y = 1 - y
# Bad agents always contribute.
# Good agents will only work if the model is doing well.
# Add a bit of chance they will contribute since 0.85 accuracy is okay.
if not agent.good or random.random() < accuracy + 0.15:
value = agent.get_next_deposit()
if value > balance:
value = balance
msg = Msg(agent.address, value)
try:
self._decai.add_data(msg, x, y)
# Don't need to plot every time. Plot less as we get more data.
update_balance_plot = next_data_index / len(x_remaining) + 0.1 < random.random()
balance = self._balances[agent.address]
if continuous_evaluation:
unclaimed_data.append((current_time, agent, x, y))
next_data_index += 1
pbar.update()
except RejectException:
# Probably failed because they didn't pay enough which is okay.
# Or if not enough time has passed since data was attempted to be added
# which is okay too because a real contract would reject this
# because the smallest unit of time we can use is 1s.
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.exception("Error adding data.")
if balance > 0:
q.put((current_time + agent.get_next_wait_s(), agent))
claimed_indices = []
for i in range(len(unclaimed_data)):
added_time, adding_agent, x, classification = unclaimed_data[i]
if current_time - added_time < self._decai.im.refund_time_s:
break
if next_data_index >= len(x_remaining) \
and current_time - added_time < self._decai.im.any_address_claim_wait_time_s:
break
balance = self._balances[agent.address]
msg = Msg(agent.address, balance)
if current_time - added_time > self._decai.im.any_address_claim_wait_time_s:
# Attempt to take the entire deposit.
try:
self._decai.report(msg, x, classification, added_time, adding_agent.address)
update_balance_plot = True
except RejectException:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.exception("Error taking reward.")
elif adding_agent.address == agent.address:
try:
self._decai.refund(msg, x, classification, added_time)
update_balance_plot = True
except RejectException:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.exception("Error getting refund.")
else:
try:
self._decai.report(msg, x, classification, added_time, adding_agent.address)
update_balance_plot = True
except RejectException:
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.exception("Error taking reward.")
stored_data = self._decai.data_handler.get_data(x, classification,
added_time, adding_agent.address)
if stored_data.claimable_amount <= 0:
claimed_indices.append(i)
for i in claimed_indices[::-1]:
unclaimed_data.pop(i)
if update_balance_plot:
balance = self._balances[agent.address]
doc.add_next_tick_callback(
partial(plot_cb, agent=agent, t=current_time, b=balance))
self._logger.info("Done going through data.")
if continuous_evaluation:
pbar.set_description(f"{desc} ({len(unclaimed_data)} unclaimed)")
if isinstance(self._decai.im, PredictionMarket):
self._time.add_time(agents[0].get_next_wait_s())
self._decai.im.end_market()
for i, test_set_portion in enumerate(pm_test_sets):
if i != self._decai.im.test_reveal_index:
self._decai.im.verify_next_test_set(test_set_portion)
with tqdm(desc="Processing contributions",
unit_scale=True, mininterval=2, unit=" contributions",
total=self._decai.im.get_num_contributions_in_market(),
) as pbar:
finished_first_round_of_rewards = False
while self._decai.im.remaining_bounty_rounds > 0:
self._time.add_time(agents[0].get_next_wait_s())
self._decai.im.process_contribution()
pbar.update()
if not finished_first_round_of_rewards:
accuracy = self._decai.im.prev_acc
# If we plot too often then we end up with a blob instead of a line.
if random.random() < 0.1:
doc.add_next_tick_callback(
partial(plot_accuracy_cb, t=self._time(), a=accuracy))
if self._decai.im.state == MarketPhase.REWARD_RESTART:
finished_first_round_of_rewards = True
if self._decai.im.reset_model_during_reward_phase:
# Update the accuracy after resetting all data.
accuracy = self._decai.im.prev_acc
else:
# Use the accuracy after training with all data.
pass
doc.add_next_tick_callback(
partial(plot_accuracy_cb, t=self._time(), a=accuracy))
pbar.total += self._decai.im.get_num_contributions_in_market()
self._time.add_time(self._time() * 0.001)
for agent in agents:
balance = self._balances[agent.address]
market_bal = self._decai.im._market_balances[agent.address]
self._logger.debug("\"%s\" market balance: %0.2f Balance: %0.2f",
agent.address, market_bal, balance)
doc.add_next_tick_callback(
partial(plot_cb, agent=agent, t=self._time(), b=max(balance + market_bal, 0)))
self._time.add_time(self._time() * 0.02)
for agent in agents:
msg = Msg(agent.address, 0)
# Find data submitted by them.
data = None
for key, stored_data in self._decai.data_handler:
if stored_data.sender == agent.address:
data = key[0]
break
if data is not None:
self._decai.refund(msg, np.array(data), stored_data.classification, stored_data.time)
balance = self._balances[agent.address]
doc.add_next_tick_callback(
partial(plot_cb, agent=agent, t=self._time(), b=balance))
self._logger.info("Balance for \"%s\": %.2f (%+.2f%%)",
agent.address, balance,
(balance - agent.start_balance) / agent.start_balance * 100)
else:
self._logger.warning("No data submitted by \"%s\" was found."
"\nWill not update it's balance.", agent.address)
self._logger.info("Done issuing rewards.")
accuracy = self._decai.model.log_evaluation_details(x_test, y_test)
doc.add_next_tick_callback(
partial(plot_accuracy_cb, t=current_time + 100, a=accuracy))
with open(save_path, 'w') as f:
json.dump(save_data, f, separators=(',', ':'))
self._decai.model.export(model_save_path, classifications, feature_index_mapping=feature_index_mapping)
if os.path.exists(plot_save_path):
os.remove(plot_save_path)
self.save_plot_image(plot, plot_save_path)
doc.add_root(plot)
thread = Thread(target=task)
thread.start()
|
import os
import sys
import math
from injector import inject, Injector
from decai.simulation.contract.classification.classifier import Classifier
from decai.simulation.contract.classification.decision_tree import DecisionTreeModule
from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule
from decai.simulation.contract.incentive.stakeable import StakeableImModule
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.titanic_data_loader import TitanicDataModule
from decai.simulation.logging_module import LoggingModule
from decai.simulation.simulate import Agent, Simulator
# For `bokeh serve`.
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
class Runner(object):
@inject
def __init__(self,
data: DataLoader,
simulator: Simulator,
):
self._data = data
self._s = simulator
def run(self):
init_train_data_portion = 0.10
# Set up the agents that will act in the simulation.
agents = [
# Good
Agent(address="Good",
start_balance=10_000,
mean_deposit=5,
stdev_deposit=1,
mean_update_wait_s=10 * 60,
),
# Malicious: determined with the goal of disrupting others.
Agent(address="Bad",
start_balance=10_000,
mean_deposit=10,
stdev_deposit=3,
mean_update_wait_s=1 * 60 * 60,
good=False,
),
]
# Start the simulation.
self._s.simulate(agents,
baseline_accuracy=0.806,
init_train_data_portion=init_train_data_portion,
accuracy_plot_wait_s=math.inf,
)
# Run with `bokeh serve PATH`.
if __name__.startswith('bk_script_'):
# Set up the data, model, and incentive mechanism.
inj = Injector([
DecisionTreeModule,
DefaultCollaborativeTrainerModule,
LoggingModule,
StakeableImModule,
TitanicDataModule,
])
inj.get(Runner).run()
if __name__ == '__main__':
# Play the game.
inj = Injector([
DecisionTreeModule(regression=False),
DefaultCollaborativeTrainerModule,
LoggingModule,
StakeableImModule,
TitanicDataModule
])
d = inj.get(DataLoader)
(x_train, y_train), (x_test, y_test) = d.load_data()
c = inj.get(Classifier)
c.init_model(x_train, y_train)
score = c.evaluate(x_train, y_train)
import random
for _ in range(10):
i = random.randrange(len(x_train))
print(f"{i:04d}: {x_train[i]}: {y_train[i]}")
print(f"Prediction: {c.predict(x_train[i])}")
print(f"Evaluation on training data: {score * 100:0.2f}%")
if len(x_test) > 0:
score = c.evaluate(x_test, y_test)
print(f"Evaluation on test data: {score * 100:0.2f}%")
|
import json
import os
import random
import sys
from collections import Counter
from typing import cast
import math
import numpy as np
from injector import inject, Injector
from decai.simulation.contract.classification.classifier import Classifier
from decai.simulation.contract.classification.decision_tree import DecisionTreeModule
from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule
from decai.simulation.contract.incentive.stakeable import StakeableImModule
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.ttt_data_loader import TicTacToeDataModule, TicTacToeDataLoader
from decai.simulation.logging_module import LoggingModule
from decai.simulation.simulate import Agent, Simulator
# For `bokeh serve`.
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
class Runner(object):
@inject
def __init__(self,
data: DataLoader,
simulator: Simulator,
):
self._data = data
self._s = simulator
def run(self):
init_train_data_portion = 0.10
# Set up the agents that will act in the simulation.
agents = [
# Good
Agent(address="Good",
start_balance=10_000,
mean_deposit=5,
stdev_deposit=1,
mean_update_wait_s=10 * 60,
),
# Malicious: determined with the goal of disrupting others.
Agent(address="Bad",
start_balance=10_000,
mean_deposit=10,
stdev_deposit=3,
mean_update_wait_s=1 * 60 * 60,
good=False,
),
]
# Start the simulation.
self._s.simulate(agents,
baseline_accuracy=0.44,
init_train_data_portion=init_train_data_portion,
accuracy_plot_wait_s=math.inf,
)
# Run with `bokeh serve PATH`.
if __name__.startswith('bk_script_'):
# Set up the data, model, and incentive mechanism.
inj = Injector([
DecisionTreeModule,
DefaultCollaborativeTrainerModule,
LoggingModule,
StakeableImModule,
TicTacToeDataModule,
])
inj.get(Runner).run()
def _map_pos(tic_tac_toe, board, pos):
assert 0 <= pos < board.size
return pos // tic_tac_toe.width, pos % tic_tac_toe.width
def play_game(classifier, tic_tac_toe):
board = np.zeros((tic_tac_toe.width, tic_tac_toe.length), dtype=np.int8)
if random.random() < 0.5:
# Machine is playing.
pos = classifier.predict(board.flatten())
board[_map_pos(tic_tac_toe, board, pos)] = 1
m = {0: '#', 1: 'O', -1: 'X'}
map_symbols = np.vectorize(lambda x: m[x])
def print_board(b):
print(np.array2string(map_symbols(b), formatter={'str_kind': lambda x: x}))
print(f"The machine is O. You are X.\nPositions:\n{np.arange(board.size).reshape(board.shape)}")
while True:
if np.count_nonzero(board) == board.size:
print("TIE")
break
# Person's turn.
print_board(board)
while True:
pos = input("Where would you like to go?")
pos = _map_pos(tic_tac_toe, board, int(pos.strip()))
if board[pos] == 0:
board[pos] = -1
break
else:
print("There is already a value there.")
winner = tic_tac_toe.get_winner(board)
if winner is not None:
print("You WIN!")
break
# Machine's turn.
original_pos = classifier.predict(board.flatten())
pos = _map_pos(tic_tac_toe, board, original_pos)
if board[pos] != 0:
print(f"Machine picked a spot that already has a marker ({original_pos}). This probably means a draw.")
print_board(board)
break
board[pos] = 1
winner = tic_tac_toe.get_winner(board)
if winner is not None:
print("You lose :(")
break
print_board(board)
def evaluate_on_self(classifier, tic_tac_toe):
print("Evaluating by playing against itself.")
def _run_game(board, next_player):
if next_player == -1:
# Flip the board since the bot always thinks it is 1.
board_for_prediction = -board
else:
board_for_prediction = board
pos = classifier.predict(board_for_prediction.flatten())
pos = _map_pos(tic_tac_toe, board, pos)
if board[pos] != 0:
return "TIE", np.count_nonzero(board == next_player)
board[pos] = next_player
if tic_tac_toe.get_winner(board):
return next_player, np.count_nonzero(board == next_player)
else:
return _run_game(board, -1 if next_player == 1 else 1)
# Start with empty board and let the model pick where to start.
board = np.zeros((tic_tac_toe.width, tic_tac_toe.length), dtype=np.int8)
winner, num_moves = _run_game(board, 1)
if winner == 1:
print(f"When model starts: WINS in {num_moves} moves.")
elif isinstance(winner, str):
print(f"When model starts: {winner} in {num_moves} moves.")
else:
print(f"When model starts: LOSES. Winner has {num_moves} moves.")
winners = Counter()
winner_move_counts = []
for start_pos in range(board.size):
board = np.zeros((tic_tac_toe.width, tic_tac_toe.length), dtype=np.int8)
board[_map_pos(tic_tac_toe, board, start_pos)] = -1
winner, num_moves = _run_game(board, 1)
winners[winner] += 1
winner_move_counts.append(num_moves)
print("Winners when -1 starts in each position:")
print(json.dumps(winners, indent=2))
print(f"Winner move counts:\n{winner_move_counts}")
print(f"Avg # winner moves: {np.average(winner_move_counts)}")
if __name__ == '__main__':
# Play the game.
inj = Injector([
DecisionTreeModule,
DefaultCollaborativeTrainerModule,
LoggingModule,
StakeableImModule,
TicTacToeDataModule,
])
ttt = inj.get(DataLoader)
assert isinstance(ttt, TicTacToeDataLoader)
ttt = cast(TicTacToeDataLoader, ttt)
# To train on all data.
# ttt._train_split = 1
(x_train, y_train), (x_test, y_test) = ttt.load_data()
c = inj.get(Classifier)
c.init_model(x_train, y_train)
score = c.evaluate(x_train, y_train)
print(f"Evaluation on training data: {score}")
if len(x_test) > 0:
score = c.evaluate(x_test, y_test)
print(f"Evaluation on test data: {score}")
evaluate_on_self(c, ttt)
while True:
play_game(c, ttt)
|
import os
import sys
import math
from injector import inject, Injector
from decai.simulation.contract.classification.classifier import Classifier
from decai.simulation.contract.classification.decision_tree import DecisionTreeModule
from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule
from decai.simulation.contract.incentive.stakeable import StakeableImModule
from decai.simulation.data.bhp_data_loader import BhpDataModule
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.logging_module import LoggingModule
from decai.simulation.simulate import Agent, Simulator
# For `bokeh serve`.
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
class Runner(object):
@inject
def __init__(self,
data: DataLoader,
simulator: Simulator,
):
self._data = data
self._s = simulator
def run(self):
init_train_data_portion = 0.10
# Set up the agents that will act in the simulation.
agents = [
# Good
Agent(address="Good",
start_balance=10_000,
mean_deposit=5,
stdev_deposit=1,
mean_update_wait_s=10 * 60,
),
# Malicious: determined with the goal of disrupting others.
Agent(address="Bad",
start_balance=10_000,
mean_deposit=10,
stdev_deposit=3,
mean_update_wait_s=1 * 60 * 60,
good=False,
),
]
# Start the simulation.
self._s.simulate(agents,
baseline_accuracy=0.44,
init_train_data_portion=init_train_data_portion,
accuracy_plot_wait_s=math.inf,
)
# Run with `bokeh serve PATH`.
if __name__.startswith('bk_script_'):
# Set up the data, model, and incentive mechanism.
inj = Injector([
DecisionTreeModule,
DefaultCollaborativeTrainerModule,
LoggingModule,
StakeableImModule,
BhpDataModule,
])
inj.get(Runner).run()
if __name__ == '__main__':
# Play the game.
inj = Injector([
DecisionTreeModule(regression=True),
DefaultCollaborativeTrainerModule,
LoggingModule,
StakeableImModule,
BhpDataModule,
])
d = inj.get(DataLoader)
(x_train, y_train), (x_test, y_test) = d.load_data()
c = inj.get(Classifier)
c.init_model(x_train, y_train)
score = c.evaluate(x_train, y_train)
import random
for _ in range(10):
i = random.randrange(len(x_train))
print(f"{i:04d}: {x_train[i]}: {y_train[i]}")
print(f"Prediction: {c.predict(x_train[i])}")
print(f"Evaluation on training data: {score}")
if len(x_test) > 0:
score = c.evaluate(x_test, y_test)
print(f"Evaluation on test data: {score}")
|
import logging
from dataclasses import dataclass, field
from logging import Logger
from injector import Module, provider, singleton
@dataclass
class LoggingModule(Module):
_log_level: int = field(default=logging.INFO)
@provider
@singleton
def provide_logger(self) -> Logger:
result = logging.Logger('decai')
result.setLevel(self._log_level)
f = logging.Formatter('%(asctime)s [%(levelname)s] - %(name)s:%(filename)s:%(funcName)s\n%(message)s')
h = logging.StreamHandler()
h.setFormatter(f)
result.addHandler(h)
return result
|
import os
import sys
from typing import Optional
from injector import Injector
from decai.simulation.contract.classification.perceptron import PerceptronModule
from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule
from decai.simulation.contract.incentive.stakeable import StakeableImModule
from decai.simulation.data.imdb_data_loader import ImdbDataModule
from decai.simulation.logging_module import LoggingModule
from decai.simulation.simulate import Agent, Simulator
# For `bokeh serve`.
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
num_words = 1000
train_size: Optional[int] = None
if train_size is None:
init_train_data_portion = 0.08
else:
init_train_data_portion = 100 / train_size
def main():
# Set up the agents that will act in the simulation.
agents = [
# Good
Agent(address="Good",
start_balance=10_000,
mean_deposit=50,
stdev_deposit=10,
mean_update_wait_s=10 * 60,
prob_mistake=0.0001,
),
# Malicious: A determined agent with the goal of disrupting others.
Agent(address="Bad",
start_balance=10_000,
mean_deposit=100,
stdev_deposit=3,
mean_update_wait_s=1 * 60 * 60,
good=False,
),
# One that just calls the model and pays to use the model.
Agent(address="Caller",
start_balance=30_000,
mean_deposit=0,
stdev_deposit=0,
mean_update_wait_s=2 * 60 * 60,
calls_model=True,
pay_to_call=50
),
]
# No caller (assume free to call).
agents = agents[:-1]
# Set up the data, model, and incentive mechanism.
inj = Injector([
DefaultCollaborativeTrainerModule,
ImdbDataModule(num_words=num_words),
LoggingModule,
PerceptronModule,
StakeableImModule,
])
s = inj.get(Simulator)
# Accuracy on hidden test set after training with all training data:
baseline_accuracies = {
100: 0.6210,
200: 0.6173,
1000: 0.7945,
10000: 0.84692,
20000: 0.8484,
}
# Start the simulation.
s.simulate(agents,
baseline_accuracy=baseline_accuracies[num_words],
init_train_data_portion=init_train_data_portion,
train_size=train_size,
)
# Run with `bokeh serve PATH`.
if __name__.startswith('bk_script_'):
main()
|
import os
import sys
import math
from injector import inject, Injector
from decai.simulation.contract.balances import Balances
from decai.simulation.contract.classification.perceptron import PerceptronModule
from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule
from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism
from decai.simulation.contract.incentive.prediction_market import PredictionMarket, PredictionMarketImModule
from decai.simulation.contract.objects import Msg
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.imdb_data_loader import ImdbDataModule
from decai.simulation.logging_module import LoggingModule
from decai.simulation.simulate import Agent, Simulator
# For `bokeh serve`.
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
num_words = 1000
class Runner(object):
@inject
def __init__(self,
balances: Balances,
data: DataLoader,
im: IncentiveMechanism,
simulator: Simulator,
):
assert isinstance(im, PredictionMarket)
self._balances = balances
self._data = data
self._im = im
self._s = simulator
def run(self):
initializer_address = 'initializer'
total_bounty = 100_000
train_size = 10_000
test_size = 1000
init_train_data_portion = 10 / train_size
# Set up the agents that will act in the simulation.
agents = [
# Good
Agent(address="Good 1",
start_balance=10_000,
mean_deposit=5,
stdev_deposit=1,
mean_update_wait_s=10 * 60,
),
Agent(address="Good 2",
start_balance=10_000,
mean_deposit=5,
stdev_deposit=1,
mean_update_wait_s=20 * 60,
),
Agent(address="Good 3",
start_balance=10_000,
mean_deposit=5,
stdev_deposit=1,
mean_update_wait_s=30 * 60,
),
# Malicious: determined with the goal of disrupting others.
Agent(address="Bad 1",
start_balance=10_000,
mean_deposit=10,
stdev_deposit=3,
mean_update_wait_s=1 * 60 * 60,
good=False,
),
Agent(address="Bad 2",
start_balance=10_000,
mean_deposit=10,
stdev_deposit=3,
mean_update_wait_s=1 * 60 * 60,
good=False,
),
]
self._balances.initialize(initializer_address, total_bounty)
(x_train, y_train), (x_test, y_test) = self._data.load_data(train_size=train_size, test_size=test_size)
init_idx = int(len(x_train) * init_train_data_portion)
assert init_idx > 0
x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]
# Split test set into pieces.
num_pieces = 10
test_dataset_hashes, test_sets = self._im.get_test_set_hashes(num_pieces, x_test, y_test)
# Ending criteria:
min_length_s = 1_000
min_num_contributions = len(x_remaining)
save_model = isinstance(self._im, PredictionMarket) and self._im.reset_model_during_reward_phase
self._im.model.init_model(x_init_data, y_init_data, save_model)
test_reveal_index = self._im.initialize_market(Msg(initializer_address, total_bounty),
test_dataset_hashes,
min_length_s, min_num_contributions)
assert 0 <= test_reveal_index < len(test_dataset_hashes)
self._im.reveal_init_test_set(test_sets[test_reveal_index])
# Accuracy on hidden test set after training with all training data:
baseline_accuracies = {
100: 0.6210,
200: 0.6173,
1000: 0.7945,
10000: 0.84692,
20000: 0.8484,
}
# Start the simulation.
self._s.simulate(agents,
baseline_accuracy=baseline_accuracies[num_words],
init_train_data_portion=init_train_data_portion,
pm_test_sets=test_sets,
accuracy_plot_wait_s=math.inf,
train_size=train_size,
)
# Run with `bokeh serve PATH`.
if __name__.startswith('bk_script_'):
# Set up the data, model, and incentive mechanism.
inj = Injector([
DefaultCollaborativeTrainerModule,
ImdbDataModule(num_words=num_words),
LoggingModule,
PerceptronModule,
PredictionMarketImModule,
])
inj.get(Runner).run()
|
import os
import re
import sys
from injector import Injector
from sklearn.naive_bayes import MultinomialNB
from decai.simulation.contract.classification.ncc_module import NearestCentroidClassifierModule
from decai.simulation.contract.classification.perceptron import PerceptronModule
from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule
from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule
from decai.simulation.contract.incentive.stakeable import StakeableImModule
from decai.simulation.data.featuremapping.hashing.murmurhash3 import MurmurHash3Module
from decai.simulation.data.fitness_data_loader import FitnessDataModule
from decai.simulation.data.imdb_data_loader import ImdbDataModule
from decai.simulation.data.news_data_loader import NewsDataModule
from decai.simulation.data.offensive_data_loader import OffensiveDataModule
from decai.simulation.logging_module import LoggingModule
from decai.simulation.simulate import Agent, Simulator
# For `bokeh serve`.
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
datasets = dict(
fitness=dict(module=FitnessDataModule,
train_size=3500, test_size=1500,
),
imdb=dict(module=ImdbDataModule(num_words=1000),
train_size=None, test_size=None,
),
news=dict(module=NewsDataModule,
train_size=None, test_size=None,
),
offensive=dict(module=OffensiveDataModule,
train_size=None, test_size=None,
),
)
models = dict(
nb=dict(module=SciKitClassifierModule(MultinomialNB),
baseline_accuracy=dict(
# train_size, test_size = 3500, 1500
fitness=0.97,
# train_size, test_size = None, None
imdb=0.8323,
# train_size, test_size = None, None
news=0.8181,
)),
ncc=dict(module=NearestCentroidClassifierModule,
baseline_accuracy=dict(
# train_size, test_size = 3500, 1500
fitness=0.9513,
# train_size, test_size = None, None
imdb=0.7445,
# train_size, test_size = None, None
news=0.6727,
)),
perceptron=dict(module=PerceptronModule,
baseline_accuracy=dict(
# train_size, test_size = 3500, 1500
fitness=0.9507,
# train_size, test_size = None, None
imdb=0.73,
# train_size, test_size = None, None
news=0.9003,
)),
)
# Set up the agents that will act in the simulation.
agents = [
# Good
Agent(address="Good",
start_balance=10_000,
mean_deposit=50,
stdev_deposit=10,
mean_update_wait_s=10 * 60,
prob_mistake=0.0001,
),
# Malicious: A determined agent with the goal of disrupting others.
Agent(address="Bad",
start_balance=10_000,
mean_deposit=100,
stdev_deposit=3,
mean_update_wait_s=1 * 60 * 60,
good=False,
),
# One that just calls the model and pays to use the model.
Agent(address="Caller",
start_balance=30_000,
mean_deposit=0,
stdev_deposit=0,
mean_update_wait_s=2 * 60 * 60,
calls_model=True,
pay_to_call=50
),
]
def main():
global agents
# This file is set up to use different models and datasets.
dataset = 'offensive'
model_type = 'nb'
assert dataset in datasets
assert model_type in models
train_size = datasets[dataset]['train_size']
test_size = datasets[dataset]['test_size']
if train_size is None:
init_train_data_portion = 0.08
else:
init_train_data_portion = 100 / train_size
# No caller (assume free to call).
agents = agents[:-1]
# Set up the data, model, and incentive mechanism.
inj = Injector([
DefaultCollaborativeTrainerModule,
datasets[dataset]['module'],
MurmurHash3Module,
LoggingModule,
models[model_type]['module'],
StakeableImModule,
])
s = inj.get(Simulator)
# Start the simulation.
s.simulate(agents,
baseline_accuracy=models[model_type]['baseline_accuracy'].get(dataset),
init_train_data_portion=init_train_data_portion,
train_size=train_size,
test_size=test_size,
filename_indicator=f"{dataset}-{model_type}"
)
# Run with `bokeh serve PATH`.
if re.match('bk_script_|bokeh_app_', __name__):
main()
else:
print("`__name__` didn't match the pattern. Bokeh app will not run.")
|
import os
import sys
import math
from injector import inject, Injector
from sklearn.naive_bayes import MultinomialNB
from decai.simulation.contract.classification.classifier import Classifier
from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule
from decai.simulation.contract.collab_trainer import DefaultCollaborativeTrainerModule
from decai.simulation.contract.incentive.stakeable import StakeableImModule
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.titanic_data_loader import TitanicDataModule
from decai.simulation.logging_module import LoggingModule
from decai.simulation.simulate import Agent, Simulator
# For `bokeh serve`.
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
# FIXME Using MultinomialNB might not work well with the Titanic dataset because it requires discrete features.
class Runner(object):
@inject
def __init__(self,
data: DataLoader,
simulator: Simulator,
):
self._data = data
self._s = simulator
def run(self):
init_train_data_portion = 0.10
# Set up the agents that will act in the simulation.
agents = [
# Good
Agent(address="Good",
start_balance=1_000,
mean_deposit=5,
stdev_deposit=1,
mean_update_wait_s=10 * 60,
),
# Malicious: determined with the goal of disrupting others.
Agent(address="Bad",
start_balance=1_000,
mean_deposit=10,
stdev_deposit=3,
mean_update_wait_s=1 * 60 * 60,
good=False,
),
]
# Start the simulation.
self._s.simulate(agents,
baseline_accuracy=0.791,
init_train_data_portion=init_train_data_portion,
accuracy_plot_wait_s=math.inf,
)
# Run with `bokeh serve PATH`.
if __name__.startswith('bk_script_'):
# Set up the data, model, and incentive mechanism.
inj = Injector([
SciKitClassifierModule(MultinomialNB),
DefaultCollaborativeTrainerModule,
LoggingModule,
StakeableImModule,
TitanicDataModule,
])
inj.get(Runner).run()
if __name__ == '__main__':
# Play the game.
inj = Injector([
SciKitClassifierModule(MultinomialNB),
DefaultCollaborativeTrainerModule,
LoggingModule,
StakeableImModule,
TitanicDataModule
])
d = inj.get(DataLoader)
(x_train, y_train), (x_test, y_test) = d.load_data()
c = inj.get(Classifier)
c.init_model(x_train, y_train)
score = c.evaluate(x_train, y_train)
print(f"Evaluation on training data: {score * 100:0.2f}%")
if len(x_test) > 0:
score = c.evaluate(x_test, y_test)
print(f"Evaluation on test data: {score * 100:0.2f}%")
|
from abc import ABC, abstractmethod
from injector import Module, inject, singleton
from decai.simulation.contract.balances import Balances
from decai.simulation.contract.classification.classifier import Classifier
from decai.simulation.contract.data.data_handler import DataHandler
from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism
from decai.simulation.contract.objects import Msg, SmartContract
class CollaborativeTrainer(ABC, SmartContract):
"""
Base class for the main interface to create simulations of a training model in a smart contract.
"""
def __init__(self,
balances: Balances,
data_handler: DataHandler,
incentive_mechanism: IncentiveMechanism,
model: Classifier,
):
super().__init__()
self.data_handler = data_handler
self.im = incentive_mechanism
self.model = model
self._balances = balances
@abstractmethod
def add_data(self, msg: Msg, data, label):
"""
Update the model with one data sample.
:param msg: Standard message to pass to any method of a smart contract.
:param data: A single sample of training data for the model.
:param label: The label for `data`.
"""
pass
@abstractmethod
def predict(self, msg: Msg, data):
"""
:param msg: Standard message to pass to any method of a smart contract.
:param data:
:return: The predicted classification/label for `data`.
"""
pass
@abstractmethod
def refund(self, msg: Msg, data, classification, added_time: int):
"""
Attempt a refund for the deposit given with submitted data.
Must be called by the address that originally submitted the data.
:param msg: Standard message to pass to any method of a smart contract.
:param data: The data for which to attempt a refund.
:param classification: The label originally submitted with `data`.
:param added_time :The time when the data was added.
"""
pass
@abstractmethod
def report(self, msg: Msg, data, classification, added_time: int, original_author: str):
"""
Report bad or old data and attempt to get a reward.
:param msg: Standard message to pass to any method of a smart contract.
:param data: The data to report.
:param classification: The label originally submitted with `data`.
:param added_time :The time when the data was added.
:param original_author: The address that originally added the data.
"""
pass
@singleton
class DefaultCollaborativeTrainer(CollaborativeTrainer):
"""
Default implementation of the main interface.
"""
@inject
def __init__(self,
balances: Balances,
data_handler: DataHandler,
incentive_mechanism: IncentiveMechanism,
model: Classifier,
):
kwargs = dict(locals())
del kwargs['self']
del kwargs['__class__']
super().__init__(**kwargs)
self.data_handler.owner = self.address
self.im.owner = self.address
self.model.owner = self.address
def predict(self, msg: Msg, data):
self.im.distribute_payment_for_prediction(msg.sender, msg.value)
return self.model.predict(data)
# FUNCTIONS FOR HANDLING DATA
def add_data(self, msg: Msg, data, classification):
# Consider making sure duplicate data isn't added until it's been claimed.
cost, update_model = self.im.handle_add_data(msg.sender, msg.value, data, classification)
self.data_handler.handle_add_data(msg.sender, cost, data, classification)
if update_model:
self.model.update(data, classification)
# In Solidity the message's value gets taken automatically.
# Here we do this at the end in case something failed while trying to add data.
self._balances.send(msg.sender, self.address, cost)
def refund(self, msg: Msg, data, classification, added_time: int):
(claimable_amount, claimed_by_submitter, stored_data) = \
self.data_handler.handle_refund(msg.sender, data, classification, added_time)
prediction = self.model.predict(data)
refund_amount = self.im.handle_refund(msg.sender, stored_data,
claimable_amount, claimed_by_submitter, prediction)
self._balances.send(self.address, msg.sender, refund_amount)
# The Solidity version doesn't need this extra function call because if there is an error earlier,
# then the changes automatically get reverted.
self.data_handler.update_claimable_amount(msg.sender, stored_data, refund_amount)
def report(self, msg: Msg, data, classification, added_time: int, original_author: str):
claimed_by_reporter, stored_data = \
self.data_handler.handle_report(msg.sender, data, classification, added_time, original_author)
prediction = lambda: self.model.predict(data)
reward_amount = self.im.handle_report(msg.sender, stored_data, claimed_by_reporter, prediction)
self.data_handler.update_claimable_amount(msg.sender, stored_data, reward_amount)
self._balances.send(self.address, msg.sender, reward_amount)
class DefaultCollaborativeTrainerModule(Module):
def configure(self, binder):
binder.bind(CollaborativeTrainer, to=DefaultCollaborativeTrainer)
|
from dataclasses import dataclass, field
from logging import Logger
from typing import Dict
from injector import inject, singleton
from decai.simulation.contract.objects import Address
@inject
@singleton
@dataclass
class Balances(object):
"""
Tracks balances in the simulation.
"""
_logger: Logger
_balances: Dict[Address, float] = field(default_factory=dict, init=False)
def __contains__(self, address: Address):
"""
:param address: A participant's address.
:return: `True` if the address is in the simulation, `False` otherwise.
"""
return address in self._balances
def __getitem__(self, address: Address) -> float:
"""
:param address: A participant's address.
:return: The balance for `address`.
"""
return self._balances[address]
def get_all(self) -> Dict[Address, float]:
"""
:return: A copy of the balances.
"""
return dict(self._balances)
def initialize(self, address: Address, start_balance: float):
""" Initialize a participant's balance. """
assert address not in self._balances, f"'{address}' already has a balance."
self._balances[address] = start_balance
def send(self, sending_address: Address, receiving_address: Address, amount):
""" Send funds from one participant to another. """
assert amount >= 0
if amount > 0:
sender_balance = self._balances[sending_address]
if sender_balance < amount:
self._logger.warning(f"'{sending_address} has {sender_balance} < {amount}.\n"
f"Will only send {sender_balance}.")
amount = sender_balance
self._balances[sending_address] -= amount
if receiving_address not in self._balances:
self.initialize(receiving_address, amount)
else:
self._balances[receiving_address] += amount
|
# Objects for all smart contracts.
from dataclasses import dataclass, field
from typing import Optional
from injector import singleton
Address = str
""" An address that can receive funds and participate in training models. """
@dataclass
class Msg:
"""
A message sent to a smart contract.
:param sender: The sender's address.
:param value: Amount sent with the message.
"""
sender: Address
# Need to use float since the numbers might be large. They should still actually be integers.
value: float
class RejectException(Exception):
"""
The smart contract rejected the transaction.
"""
pass
class SmartContract(object):
"""
A fake smart contract.
"""
def __init__(self):
self.address: Address = f'{type(self).__name__}-{id(self)}'
""" The address of this contract. """
self.owner: Optional[Address] = None
""" The owner of this contract. """
@singleton
@dataclass
class TimeMock(object):
"""
Helps fake the current time (in seconds).
Ideally the value returned is an integer (like `now` in Solidity) but this is not guaranteed.
Normally in an Ethereum smart contract `now` can be called.
To speed up simulations, use this class to get the current time.
"""
_time: float = field(default=0, init=False)
def __call__(self, *args, **kwargs):
""" Get the currently set time (in seconds). """
return self._time
def add_time(self, amount):
""" Add `amount` (in seconds) to the current time. """
self._time += amount
def set_time(self, time_value):
""" Set the time to return when `time()` is called. """
self._time = time_value
def time(self):
""" Get the currently set time (in seconds). """
return self._time
|
from collections import Counter
from injector import inject
from sklearn.neighbors import NearestCentroid
# Purposely not a singleton so that it is easy to get a model that has not been initialized.
@inject
class NearestCentroidClassifier(NearestCentroid):
def fit(self, X, y):
self._num_samples_per_centroid = Counter(y)
super().fit(X, y)
def partial_fit(self, training_data, labels):
# Assume len(training_data) == len(labels) == 1
# Assume centroids are indexed by class 0-N.
sample = training_data[0]
label = labels[0]
n = self._num_samples_per_centroid[label]
self.centroids_[label] = (self.centroids_[label] * n + sample) / (n + 1)
self._num_samples_per_centroid[label] = n + 1
|
import os
from sklearn.linear_model import SGDClassifier
from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule
class PerceptronModule(SciKitClassifierModule):
def __init__(self, class_weight=None):
super().__init__(
_model_initializer=lambda: SGDClassifier(
loss='perceptron',
n_jobs=max(1, os.cpu_count() - 2),
random_state=0xDeCA10B,
learning_rate='optimal',
class_weight=class_weight,
# Don't really care about tol, just setting it to remove a warning.
tol=1e-3,
penalty=None))
|
import logging
from abc import ABC, abstractmethod
from typing import List
from decai.simulation.contract.objects import SmartContract
from decai.simulation.data.featuremapping.feature_index_mapper import FeatureIndexMapping
class Classifier(ABC, SmartContract):
"""
A classifier that can take a data sample as input and return a predict classification/label for the data.
"""
@abstractmethod
def evaluate(self, data, labels) -> float:
"""
Evaluate the model.
:param data: Data samples.
:param labels: The ground truth labels for `data`.
:return: The accuracy for the given test set.
"""
pass
@abstractmethod
def log_evaluation_details(self, data, labels, level=logging.INFO) -> float:
"""
Log some evaluation details.
:param data: Data samples.
:param labels: The ground truth labels for `data`.
:param level: The level at which to log.
:return: The accuracy for the given test set.
"""
pass
@abstractmethod
def init_model(self, training_data, labels, save_model=False):
"""
Fit the model to a specific dataset.
:param training_data: The data to use to train the model.
:param labels: The ground truth labels for `data`.
:param save_model: `True` if the model should be saved, `False` otherwise.
"""
pass
@abstractmethod
def predict(self, data):
"""
:param data: The data or features for one sample.
:return: The predicted classification or label for `data`.
"""
pass
@abstractmethod
def update(self, data, classification):
"""
Update the classifier with one data sample.
:param data: The training data or features for one sample.
:param classification: The label for `data`.
"""
pass
@abstractmethod
def reset_model(self):
"""
Re-initialize the model to the same state it was in after `init_model` was called.
"""
pass
@abstractmethod
def export(self,
path: str,
classifications: List[str] = None,
model_type: str = None,
feature_index_mapping: FeatureIndexMapping = None):
"""
Export the model in a format for the demo Node.js code to load.
:param path: The path to save the exported model to.
:param classifications: The classifications output by the model.
:param model_type: The type of the model.
:param feature_index_mapping: Mapping of the feature indices. Mainly for sparse models that were converted to dense ones.
"""
pass
|
import json
import logging
import os
import time
from dataclasses import dataclass
from logging import Logger
from pathlib import Path
from typing import Any, Callable, List
import joblib
import numpy as np
import scipy.sparse
from injector import ClassAssistedBuilder, Module, inject, provider
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.naive_bayes import MultinomialNB
from decai.simulation.contract.classification.classifier import Classifier
from decai.simulation.contract.classification.ncc import NearestCentroidClassifier
from decai.simulation.data.featuremapping.feature_index_mapper import FeatureIndexMapping
# Purposely not a singleton so that it is easy to get a model that has not been initialized.
@inject
@dataclass
class SciKitClassifier(Classifier):
"""
Classifier for a scikit-learn like model.
"""
_logger: Logger
_model_initializer: Callable[[], Any]
_model = None
def __post_init__(self):
self._original_model_path = Path('saved_models') / f'{time.time()}-{id(self)}.joblib'
def evaluate(self, data, labels) -> float:
assert self._model is not None, "The model has not been initialized yet."
assert isinstance(data, np.ndarray) or scipy.sparse.isspmatrix(data), \
f"The data must be a matrix. Got: {type(data)}"
assert isinstance(labels, np.ndarray), "The labels must be an array."
self._logger.debug("Evaluating.")
return self._model.score(data, labels)
def log_evaluation_details(self, data, labels, level=logging.INFO) -> float:
assert self._model is not None, "The model has not been initialized yet."
assert isinstance(data, np.ndarray), "The data must be an array."
assert isinstance(labels, np.ndarray), "The labels must be an array."
self._logger.debug("Evaluating.")
predicted_labels = self._model.predict(data)
result = accuracy_score(labels, predicted_labels)
if self._logger.isEnabledFor(level):
m = confusion_matrix(labels, predicted_labels)
report = classification_report(labels, predicted_labels)
self._logger.log(level,
"Confusion matrix:\n%s"
"\nReport:\n%s"
"\nAccuracy: %0.2f%%",
m, report, result * 100)
return result
def init_model(self, training_data, labels, save_model=False):
assert self._model is None, "The model has already been initialized."
self._logger.debug("Initializing model.")
self._model = self._model_initializer()
self._logger.debug("training_data.shape: %s. dtype: %s", training_data.shape, training_data.dtype)
self._model.fit(training_data, labels)
if save_model:
self._logger.debug("Saving model to \"%s\".", self._original_model_path)
os.makedirs(os.path.dirname(self._original_model_path), exist_ok=True)
joblib.dump(self._model, self._original_model_path)
def predict(self, data):
assert self._model is not None, "The model has not been initialized yet."
assert isinstance(data, np.ndarray), "The data must be an array."
return self._model.predict([data])[0]
def update(self, data, classification):
assert self._model is not None, "The model has not been initialized yet."
self._model.partial_fit([data], [classification])
def reset_model(self):
assert self._model is not None, "The model has not been initialized yet."
assert self._original_model_path.exists(), "The model has not been saved. Perhaps saving was disabled."
self._logger.debug("Loading model from \"%s\".", self._original_model_path)
self._model = joblib.load(self._original_model_path)
def export(self,
path: str,
classifications: List[str] = None,
model_type: str = None,
feature_index_mapping: FeatureIndexMapping = None):
assert self._model is not None, "The model has not been initialized yet."
if isinstance(self._model, SGDClassifier) and self._model.loss == 'perceptron':
if classifications is None:
classifications = ["0", "1"]
model = {
'type': model_type or 'sparse perceptron',
'classifications': classifications,
'weights': self._model.coef_[0].tolist(),
'intercept': self._model.intercept_[0],
}
if feature_index_mapping is not None:
if model_type is None:
model['type'] = 'sparse perceptron'
weights = model['weights']
del model['weights']
weights = {str(i): v for (i, v) in zip(feature_index_mapping, weights) if v != 0}
model['sparseWeights'] = weights
elif isinstance(self._model, MultinomialNB):
if classifications is None:
classifications = list(map(str, range(self._model.feature_count_.shape[1])))
feature_counts = []
for class_features in self._model.feature_count_:
class_feature_counts = []
for index, count in enumerate(class_features):
if count != 0:
# Counts should already be integers.
if feature_index_mapping is not None:
index = feature_index_mapping[index]
class_feature_counts.append((index, int(count)))
feature_counts.append(class_feature_counts)
model = {
'type': model_type or 'naive bayes',
'classifications': classifications,
'classCounts': self._model.class_count_.astype(dtype=np.int64).tolist(),
'featureCounts': feature_counts,
'totalNumFeatures': self._model.feature_count_.shape[1],
'smoothingFactor': self._model.alpha,
}
elif isinstance(self._model, NearestCentroidClassifier):
if feature_index_mapping is not None:
if model_type is None:
model_type = 'sparse nearest centroid classifier'
centroids = dict()
if classifications is None:
classifications = list(map(str, range(len(self.centroids_))))
for i, classification in enumerate(classifications):
centroid = self._model.centroids_[i].tolist()
if feature_index_mapping is not None:
centroid = {str(i): v for (i, v) in zip(feature_index_mapping, centroid) if v != 0}
centroids[classification] = dict(
centroid=centroid,
dataCount=self._model._num_samples_per_centroid[i])
model = {
'type': model_type or 'nearest centroid classifier',
'centroids': centroids,
}
else:
raise Exception("Unrecognized model type.")
with open(path, 'w') as f:
json.dump(model, f, separators=(',', ':'))
@dataclass
class SciKitClassifierModule(Module):
"""
Module to provide SciKit Learn Classifier like models.
"""
_model_initializer: Any
# Purposely not a singleton so that it is easy to get a model that has not been initialized.
@provider
def provide_classifier(self, builder: ClassAssistedBuilder[SciKitClassifier]) -> Classifier:
return builder.build(
_model_initializer=self._model_initializer,
)
|
from decai.simulation.contract.classification.ncc import NearestCentroidClassifier
from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule
class NearestCentroidClassifierModule(SciKitClassifierModule):
def __init__(self):
super().__init__(
_model_initializer=NearestCentroidClassifier)
|
from skmultiflow.trees import HAT, RegressionHAT
from decai.simulation.contract.classification.scikit_classifier import SciKitClassifierModule
class DecisionTreeModule(SciKitClassifierModule):
def __init__(self, regression=False):
if regression:
model_initializer = lambda: RegressionHAT(
# leaf_prediction='mc'
)
else:
model_initializer = lambda: HAT(
# leaf_prediction='mc',
# nominal_attributes=[ 4],
)
super().__init__(_model_initializer=model_initializer)
|
import unittest
import numpy as np
from injector import Injector
from decai.simulation.contract.classification.classifier import Classifier
from decai.simulation.contract.classification.ncc_module import NearestCentroidClassifierModule
from decai.simulation.logging_module import LoggingModule
class TestNearestCentroidClassifier(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.inj = Injector([
LoggingModule,
NearestCentroidClassifierModule,
])
def test_partial_fit(self):
model = self.inj.get(Classifier)
data = [
[-1.0, -1.0, ],
[-0.5, -0.5, ],
[+1.0, +1.0],
[+0.5, +0.5],
]
labels = [0, 0, 1, 1, ]
data = np.array(data)
labels = np.array(labels)
model.init_model(data, labels)
self.assertEqual(1, model.evaluate(data, labels))
sample = np.array([0.1, 0.1, ])
self.assertEqual(1, model.predict(sample))
# Update a point beyond `sample` so that `sample` gets a new label.
model.update(np.array([0.3, 0.3, ]), 0)
self.assertEqual(0, model.predict(sample))
self.assertEqual(1, model.evaluate(data, labels))
def test_partial_fit_2(self):
model = self.inj.get(Classifier)
data = [
[0, -1.0, ],
[0, -0.5, ],
[0, +1.0],
[0, +0.5],
]
labels = [0, 0, 1, 1, ]
data = np.array(data)
labels = np.array(labels)
model.init_model(data, labels)
self.assertEqual(1, model.evaluate(data, labels))
sample = np.array([0, +0.1, ])
self.assertEqual(1, model.predict(sample))
# Update a point beyond `sample` so that `sample` gets a new label.
model.update(np.array([0, 0, ]), 0)
self.assertEqual(0, model.predict(sample))
self.assertEqual(1, model.evaluate(data, labels))
|
import random
import unittest
import numpy as np
from injector import Injector
from decai.simulation.contract.balances import Balances
from decai.simulation.contract.classification.classifier import Classifier
from decai.simulation.contract.classification.perceptron import PerceptronModule
from decai.simulation.contract.collab_trainer import CollaborativeTrainer, DefaultCollaborativeTrainerModule
from decai.simulation.contract.incentive.stakeable import StakeableImModule
from decai.simulation.contract.objects import Msg, RejectException, TimeMock
from decai.simulation.logging_module import LoggingModule
def _ground_truth(data):
return data[0] * data[2]
class TestCollaborativeTrainer(unittest.TestCase):
@classmethod
def setUpClass(cls):
inj = Injector([
DefaultCollaborativeTrainerModule,
LoggingModule,
PerceptronModule,
StakeableImModule,
])
cls.balances = inj.get(Balances)
cls.decai = inj.get(CollaborativeTrainer)
cls.time_method = inj.get(TimeMock)
cls.good_address = 'sender'
initial_balance = 1E6
cls.balances.initialize(cls.good_address, initial_balance)
msg = Msg(cls.good_address, cls.balances[cls.good_address])
X = np.array([
# Initialization Data
[0, 0, 0],
[1, 1, 1],
# Data to Add
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
])
y = np.array([_ground_truth(x) for x in X])
cls.decai.model.init_model(np.array([X[0, :], X[1, :]]),
np.array([y[0], y[1]]))
score = cls.decai.model.evaluate(X, y)
assert score != 1, "Model shouldn't fit the data yet."
# Add all data.
first_added_time = None
for i in range(X.shape[0]):
x = X[i]
cls.time_method.set_time(cls.time_method() + 1)
if first_added_time is None:
first_added_time = cls.time_method()
cls.decai.add_data(msg, x, y[i])
for _ in range(1000):
score = cls.decai.model.evaluate(X, y)
if score >= 1:
break
i = random.randint(0, X.shape[0] - 1)
x = X[i]
cls.time_method.set_time(cls.time_method() + 1)
cls.decai.add_data(msg, x, y[i])
assert score == 1, "Model didn't fit the data."
bal = cls.balances[msg.sender]
assert bal < initial_balance, "Adding data should have a cost."
# Make sure sender has some good data refunded so that they can report data later.
cls.time_method.set_time(cls.time_method() + cls.decai.im.refund_time_s + 1)
cls.decai.refund(msg, X[0], y[0], first_added_time)
assert cls.balances[msg.sender] > bal, "Refunding should return value."
def test_predict(self):
data = np.array([0, 1, 0])
correct_class = _ground_truth(data)
prediction = self.decai.model.predict(data)
self.assertEqual(prediction, correct_class)
def test_refund(self):
data = np.array([0, 2, 0])
correct_class = _ground_truth(data)
orig_address = "Orig"
bal = 1E5
self.balances.initialize(orig_address, bal)
msg = Msg(orig_address, 1E3)
self.time_method.set_time(self.time_method() + 1)
added_time = self.time_method()
self.decai.add_data(msg, data, correct_class)
self.assertLess(self.balances[orig_address], bal)
# Add same data from another address.
msg = Msg(self.good_address, 1E3)
self.time_method.set_time(self.time_method() + 1)
bal = self.balances[self.good_address]
self.decai.add_data(msg, data, correct_class)
self.assertLess(self.balances[self.good_address], bal)
# Original address refunds.
msg = Msg(orig_address, 1E3)
bal = self.balances[orig_address]
self.time_method.set_time(self.time_method() + self.decai.im.refund_time_s + 1)
self.decai.refund(msg, data, correct_class, added_time)
self.assertGreater(self.balances[orig_address], bal)
def test_report(self):
data = np.array([0, 0, 0])
correct_class = _ground_truth(data)
submitted_classification = 1 - correct_class
# Add bad data.
malicious_address = 'malicious'
self.balances.initialize(malicious_address, 1E6)
bal = self.balances[malicious_address]
msg = Msg(malicious_address, bal)
self.time_method.set_time(self.time_method() + 1)
added_time = self.time_method()
self.decai.add_data(msg, data, submitted_classification)
self.assertLess(self.balances[malicious_address], bal,
"Adding data should have a cost.")
self.time_method.set_time(self.time_method() + self.decai.im.refund_time_s + 1)
# Can't refund.
msg = Msg(malicious_address, self.balances[malicious_address])
try:
self.decai.refund(msg, data, submitted_classification, added_time)
self.fail("Should have failed.")
except RejectException as e:
self.assertEqual("The model doesn't agree with your contribution.", e.args[0])
bal = self.balances[self.good_address]
msg = Msg(self.good_address, bal)
self.decai.report(msg, data, submitted_classification, added_time, malicious_address)
self.assertGreater(self.balances[self.good_address], bal)
def test_report_take_all(self):
data = np.array([0, 0, 0])
correct_class = _ground_truth(data)
submitted_classification = 1 - correct_class
# Add bad data.
malicious_address = 'malicious_take_backer'
self.balances.initialize(malicious_address, 1E6)
bal = self.balances[malicious_address]
msg = Msg(malicious_address, bal)
self.time_method.set_time(self.time_method() + 1)
added_time = self.time_method()
self.decai.add_data(msg, data, submitted_classification)
self.assertLess(self.balances[malicious_address], bal,
"Adding data should have a cost.")
self.time_method.set_time(self.time_method() + self.decai.im.any_address_claim_wait_time_s + 1)
# Can't refund.
msg = Msg(malicious_address, self.balances[malicious_address])
try:
self.decai.refund(msg, data, submitted_classification, added_time)
self.fail("Should have failed.")
except RejectException as e:
self.assertEqual("The model doesn't agree with your contribution.", e.args[0])
bal = self.balances[malicious_address]
msg = Msg(malicious_address, bal)
self.decai.report(msg, data, submitted_classification, added_time, malicious_address)
self.assertGreater(self.balances[malicious_address], bal)
def test_reset(self):
inj = Injector([
LoggingModule,
PerceptronModule,
])
m = inj.get(Classifier)
X = np.array([
# Initialization Data
[0, 0, 0],
[1, 1, 1],
])
y = np.array([_ground_truth(x) for x in X])
m.init_model(X, y, save_model=True)
data = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
])
original_predictions = [m.predict(x) for x in data]
labels = np.array([_ground_truth(x) for x in data])
for x, y in zip(data, labels):
m.update(x, y)
predictions_after_training = [m.predict(x) for x in data]
self.assertNotEqual(original_predictions, predictions_after_training)
m.reset_model()
new_predictions = [m.predict(x) for x in data]
self.assertEqual(original_predictions, new_predictions)
|
from collections import Counter
from logging import Logger
import math
from injector import inject, Module, singleton
from decai.simulation.contract.balances import Balances
from decai.simulation.contract.data.data_handler import StoredData
from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism
from decai.simulation.contract.objects import Address, RejectException, TimeMock
@singleton
class Stakeable(IncentiveMechanism):
"""
The Deposit, Take, Reward IM.
A deposit is required to add data.
Later that deposit can be reclaimed if the model still agrees with the contribution.
"""
@inject
def __init__(self,
# Injected
balances: Balances,
logger: Logger,
time_method: TimeMock,
# Parameters
refund_time_s=60 * 60 * 24 * 1,
any_address_claim_wait_time_s=60 * 60 * 24 * 9,
cost_weight=1,
):
super().__init__(refund_time_s=refund_time_s, any_address_claim_wait_time_s=any_address_claim_wait_time_s)
self._balances = balances
self._logger = logger
self._time = time_method
# Make sure there is at least a week for the refund.
min_refund_window_s = 60 * 60 * 24 * 7
assert self.any_address_claim_wait_time_s > self.refund_time_s + min_refund_window_s, "Claim time is not enough."
self.cost_weight = cost_weight
self.num_good_data_per_user = Counter()
self.total_num_good_data = 0
self._last_update_time_s = int(self._time())
def distribute_payment_for_prediction(self, sender, value):
if value > 0:
for agent_address, num_good in self.num_good_data_per_user.items():
# Round down like Solidity would.
# Also helps avoid errors for possible rounding so
# total value distributed < value.
self._balances.send(sender, agent_address, int(value * num_good / self.total_num_good_data))
def get_next_add_data_cost(self, data, classification) -> float:
"""
:param data: A single sample of training data for the model.
:param classification: The label for `data`.
:return: The current cost to update a model with a specific sample of training data.
"""
current_time_s = int(self._time())
# TODO Limit how many times a data point can be added if the model already classifies right for it?
# TODO Add cost to flip all data?
# TODO Add discount if already submitted good data?
# Convert to integers like in Solidity.
time_since_last_update_s = int((current_time_s - self._last_update_time_s))
if time_since_last_update_s <= 0:
raise RejectException("Not enough time has passed since the last update.")
# We really want to think about the time in hours
# (divide by 3600 but this is in the square root of the denominator so we multiply by sqrt(3600)).
# Equivalent to: cost = self.cost_weight / int(math.sqrt(time_since_last_update_s * 3600))
result = self.cost_weight * 60 / int(math.sqrt(time_since_last_update_s))
result = int(result)
# Make sure there is a minimum cost to adding data.
if result < 1:
result = 1
return result
def handle_add_data(self, contributor_address: Address, msg_value: float, data, classification) -> (float, bool):
cost = self.get_next_add_data_cost(data, classification)
update_model = True
if cost > msg_value:
raise RejectException(f"Did not pay enough. Sent {msg_value} < {cost}")
self._last_update_time_s = self._time()
return (cost, update_model)
def handle_refund(self, submitter: str, stored_data: StoredData,
claimable_amount: float, claimed_by_submitter: bool,
prediction) -> float:
result = claimable_amount
# Do not need to check submitter == stored_data.sender because DataHandler already did it.
if claimed_by_submitter:
raise RejectException("Deposit already claimed by submitter.")
if result <= 0:
raise RejectException("There is no reward left to claim.")
current_time_s = int(self._time())
if current_time_s - stored_data.time <= self.refund_time_s:
raise RejectException("Not enough time has passed.")
if callable(prediction):
prediction = prediction()
if prediction != stored_data.classification:
raise RejectException("The model doesn't agree with your contribution.")
self.num_good_data_per_user[submitter] += 1
self.total_num_good_data += 1
return result
def handle_report(self, reporter: str, stored_data: StoredData, claimed_by_reporter: bool, prediction) -> float:
if stored_data.claimable_amount <= 0:
raise RejectException("There is no reward left to claim.")
current_time_s = int(self._time())
if current_time_s - stored_data.time >= self.any_address_claim_wait_time_s:
# Enough time has passed, give the entire remaining deposit to the reporter.
self._logger.debug("Giving all remaining deposit to \"%s\".", reporter)
result = stored_data.claimable_amount
return result
# Don't allow someone to claim back their own deposit if their data was wrong.
# They can still claim it from another address but they will have had to have sent good data from that address.
if reporter == stored_data.sender:
raise RejectException("Cannot take your own deposit. Ask for a refund instead.")
if claimed_by_reporter:
raise RejectException("Deposit already claimed by reporter.")
if current_time_s - stored_data.time <= self.refund_time_s:
raise RejectException("Not enough time has passed.")
if callable(prediction):
prediction = prediction()
if prediction == stored_data.classification:
raise RejectException("The model should not agree with the contribution.")
num_good = self.num_good_data_per_user[reporter]
if num_good <= 0:
raise RejectException(f"No good data was verified by reporter '{reporter}'.")
result = stored_data.initial_deposit * num_good / self.total_num_good_data
# Handle possible rounding errors or if there is too little to divide to reporters.
if result <= 0 or result > stored_data.claimable_amount:
result = stored_data.claimable_amount
return result
class StakeableImModule(Module):
def configure(self, binder):
binder.bind(IncentiveMechanism, to=Stakeable)
|
import random
from collections import Counter, defaultdict
from dataclasses import dataclass, field
from enum import Enum
from hashlib import sha256
from logging import Logger
from typing import Dict, List, Optional, Tuple
import math
import numpy as np
from injector import ClassAssistedBuilder, inject, Module, provider, singleton
from decai.simulation.contract.balances import Balances
from decai.simulation.contract.classification.classifier import Classifier
from decai.simulation.contract.data.data_handler import StoredData
from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism
from decai.simulation.contract.objects import Address, Msg, RejectException, TimeMock
class MarketPhase(Enum):
""" Phases for the current market. """
# Phases are in chronological order.
INITIALIZATION = 0
""" The market is being initialized and awaiting for the requested test set index to be revealed. """
PARTICIPATION = 1
""" The market is open to data contributions. """
REVEAL_TEST_SET = 2
""" The market will no longer accept data and the test set must be revealed before rewards can be calculated. """
REWARD = 3
""" No more data contributions are being accepted but rewards still need to be calculated. """
REWARD_RESTART = 4
"""
Same as `REWARD` but contributions have just been filtered out
and the iteration needs to restart with the remaining contributions.
"""
REWARD_COLLECT = 5
""" The reward values have been computed and are ready to be collected. """
@dataclass
class _Contribution:
"""
A contribution to train data.
This is stored for convenience but for some applications, storing the data could be very expensive,
instead, hashes could be stored and during the reward phase,
the hash can be used to verify data as data is re-submitted.
Note: this is not in the spirit of the prediction market (the current state should be public)
since the model would not actually be updated and the submitted data would be private
so new data contributors have very limited information.
"""
contributor_address: Address
data: np.array
classification: int
balance: int
"""
Initially this is the amount deposited with this contribution.
If contributions are not grouped by contributor, then while calculating rewards this gets updated to be the balance
for this particular contribution, to know if it should get kicked out of the reward phase.
"""
score: Optional[int] = field(default=None, init=False)
"""
The score for this contribution.
Mainly used for when contributions are not grouped.
"""
accuracy: Optional[float] = field(default=None, init=False)
""" The accuracy of the model on the test set after adding this contribution. """
class PredictionMarket(IncentiveMechanism):
"""
An IM where rewards are computed based on how the model's performance changes with respect to a test set.
For now, for the purposes of the simulation, the market is only intended to be run once.
Eventually this class and the actual smart contract implementation of it
should support restarting the market with a new bounty once a market has ended.
"""
@inject
def __init__(self,
# Injected
balances: Balances,
logger: Logger,
model: Classifier,
time_method: TimeMock,
# Parameters
any_address_claim_wait_time_s=60 * 60 * 24 * 7,
# Configuration Options
allow_greater_deposit=False,
group_contributions=False,
reset_model_during_reward_phase=False,
):
super().__init__(any_address_claim_wait_time_s=any_address_claim_wait_time_s)
self._balances = balances
self._logger = logger
self.model = model
self._time = time_method
# Configuration Options
self._allow_greater_deposit = allow_greater_deposit
self._group_contributions = group_contributions
self._reset_model_during_reward_phase = reset_model_during_reward_phase
self._market_earliest_end_time_s = None
self._market_balances: Dict[Address, float] = defaultdict(float)
""" Keeps track of balances in the market. """
self._next_data_index = None
self.min_stake = 1
"""
The minimum required amount to deposit.
Should be at least 1 to handle the worst case where the contribution takes the accuracy from 1 to 0.
"""
self.state = None
@property
def reset_model_during_reward_phase(self):
return self._reset_model_during_reward_phase
def distribute_payment_for_prediction(self, sender, value):
pass
def get_num_contributions_in_market(self):
"""
:return: The total number of contributions currently in the market.
This can decrease as "bad" contributors are removed during the reward phase.
"""
return len(self._market_data)
# Methods in chronological order of the PM.
@staticmethod
def hash_test_set(test_set):
"""
:param test_set: A test set.
:return: The hash of `test_set`.
"""
return sha256(str(test_set).encode()).hexdigest()
@staticmethod
def get_test_set_hashes(num_pieces, x_test, y_test) -> Tuple[list, list]:
"""
Helper to break the test set into `num_pieces` to initialize the market.
:param num_pieces: The number of pieces to break the test set into.
:param x_test: The features for the test set.
:param y_test: The labels for `x_test`.
:return: tuple
A list of `num_pieces` hashes for each portion of the test set.
The test set divided into `num_pieces`.
"""
test_sets = []
test_dataset_hashes = []
assert len(x_test) == len(y_test) >= num_pieces
for i in range(num_pieces):
start = int(i / num_pieces * len(x_test))
end = int((i + 1) / num_pieces * len(x_test))
test_set = list(zip(x_test[start:end], y_test[start:end]))
test_sets.append(test_set)
test_dataset_hashes.append(PredictionMarket.hash_test_set(test_set))
assert sum(len(t) for t in test_sets) == len(x_test)
return test_dataset_hashes, test_sets
def initialize_market(self, msg: Msg,
test_dataset_hashes: List[str],
# Ending criteria:
min_length_s: int, min_num_contributions: int) -> int:
"""
Initialize the prediction market.
:param msg: Indicates the one posting the bounty and the amount being committed for the bounty.
The total bounty should be an integer since it also represents the number of "rounds" in the PM.
:param test_dataset_hashes: The committed hashes for the portions of the test set.
:param min_length_s: The minimum length in seconds of the market.
:param min_num_contributions: The minimum number of contributions before ending the market.
:return: The index of the test set that must be revealed.
"""
assert self._market_earliest_end_time_s is None
assert self._next_data_index is None, "The market end has already been triggered."
assert self.state is None
self.bounty_provider = msg.sender
self.total_bounty = msg.value
self.remaining_bounty_rounds = self.total_bounty
self.test_set_hashes = test_dataset_hashes
assert len(self.test_set_hashes) > 1
self.test_reveal_index = random.randrange(len(self.test_set_hashes))
self.next_test_set_index_to_verify = 0
if self.next_test_set_index_to_verify == self.test_reveal_index:
self.next_test_set_index_to_verify += 1
self._market_data: List[_Contribution] = []
self.min_num_contributions = min_num_contributions
self._market_earliest_end_time_s = self._time() + min_length_s
self.reward_phase_end_time_s = None
self.prev_acc = None
self.original_acc = None
# Pay the owner since it will be the owner distributing funds using `handle_refund` and `handle_reward` later.
self._balances.send(self.bounty_provider, self.owner, self.total_bounty)
self.state = MarketPhase.INITIALIZATION
return self.test_reveal_index
def add_test_set_hashes(self, msg: Msg, more_test_set_hashes: List[str]) -> int:
"""
(Optional)
Add more hashes for portions of the test set to reveal.
This helps in case not all hashes can be sent in one transaction.
:param msg: The message for this transaction.
The sender must be the bounty provider.
:param more_test_set_hashes: More committed hashes for the portions of the test set.
:return: The index of the test set that must be revealed.
"""
assert self.state == MarketPhase.INITIALIZATION
assert msg.sender == self.bounty_provider
# Ensure that a new test set is given and the sender isn't just trying to get a new random index.
assert len(more_test_set_hashes) > 0, "You must give at least one hash."
self.test_set_hashes += more_test_set_hashes
self.test_reveal_index = random.randrange(len(self.test_set_hashes))
self.next_test_set_index_to_verify = 0
if self.next_test_set_index_to_verify == self.test_reveal_index:
self.next_test_set_index_to_verify += 1
return self.test_reveal_index
def verify_test_set(self, index: int, test_set_portion):
"""
Verify that a portion of the test set matches the committed to hash.
:param index: The index of the test set in the originally committed list of hashes.
:param test_set_portion: The portion of the test set to reveal.
"""
assert 0 <= index < len(self.test_set_hashes)
assert len(test_set_portion) > 0
test_set_hash = self.hash_test_set(test_set_portion)
assert test_set_hash == self.test_set_hashes[index]
def reveal_init_test_set(self, test_set_portion):
"""
Reveal the required portion of the full test set.
:param test_set_portion: The portion of the test set that must be revealed before started the Participation Phase.
"""
assert self.state == MarketPhase.INITIALIZATION
self.verify_test_set(self.test_reveal_index, test_set_portion)
self.state = MarketPhase.PARTICIPATION
def handle_add_data(self, contributor_address: Address, msg_value: float, data, classification) -> (float, bool):
# Allow them to stake as much as they want to ensure they get included in future rounds.
assert self.state == MarketPhase.PARTICIPATION, f'Current state is: {self.state}.'
if msg_value < self.min_stake:
raise RejectException(f"Did not pay enough. Sent {msg_value} < {self.min_stake}")
if self._allow_greater_deposit:
cost = msg_value
else:
cost = self.min_stake
update_model = False
self._market_data.append(_Contribution(contributor_address, data, classification, cost))
self._market_balances[contributor_address] += cost
return (cost, update_model)
def end_market(self):
"""
Signal the end of the prediction market.
"""
assert self.state == MarketPhase.PARTICIPATION, f'Current state is: {self.state}.'
if self.get_num_contributions_in_market() < self.min_num_contributions \
and self._time() < self._market_earliest_end_time_s:
raise RejectException("Can't end the market yet.")
self._logger.info("Ending market.")
self.state = MarketPhase.REVEAL_TEST_SET
self._next_data_index = 0
self.test_data, self.test_labels = [], []
def verify_next_test_set(self, test_set_portion):
assert self.state == MarketPhase.REVEAL_TEST_SET
self.verify_test_set(self.next_test_set_index_to_verify, test_set_portion)
test_data, test_labels = zip(*test_set_portion)
self.test_data += test_data
self.test_labels += test_labels
self.next_test_set_index_to_verify += 1
if self.next_test_set_index_to_verify == self.test_reveal_index:
self.next_test_set_index_to_verify += 1
if self.next_test_set_index_to_verify == len(self.test_set_hashes):
self.state = MarketPhase.REWARD_RESTART
self.test_data = np.array(self.test_data)
self.test_labels = np.array(self.test_labels)
def process_contribution(self):
"""
Reward Phase:
Process the next data contribution.
"""
assert self.remaining_bounty_rounds > 0, "The market has ended."
if self.state == MarketPhase.REWARD_RESTART:
self._next_data_index = 0
self._logger.debug("Remaining bounty rounds: %s", self.remaining_bounty_rounds)
self._scores = defaultdict(float)
if self._reset_model_during_reward_phase:
# The paper implies that we should not retrain the model and instead only train once.
# The problem there is that a contributor is affected by bad contributions
# between them and the last counted contribution after bad contributions are filtered out.
self.model.reset_model()
if self.prev_acc is None:
# XXX This evaluation can be expensive and likely won't work in Ethereum.
# We need to find a more efficient way to do this or let a contributor proved they did it.
self.prev_acc = self.model.evaluate(self.test_data, self.test_labels)
self.original_acc = self.prev_acc
self._logger.debug("Accuracy: %0.2f%%", self.prev_acc * 100)
elif not self._reset_model_during_reward_phase:
# When calculating rewards, the score, the same accuracy for the initial model should be used.
self.prev_acc = self.original_acc
self._num_market_contributions: Dict[Address, int] = Counter()
self._worst_contribution: Optional[_Contribution] = None
self._worst_contributor: Optional[Address] = None
self._min_score = math.inf
self.state = MarketPhase.REWARD
else:
assert self.state == MarketPhase.REWARD
contribution = self._market_data[self._next_data_index]
self._num_market_contributions[contribution.contributor_address] += 1
self.model.update(contribution.data, contribution.classification)
if not self._reset_model_during_reward_phase and contribution.accuracy is None:
# XXX Potentially expensive gas cost.
contribution.accuracy = self.model.evaluate(self.test_data, self.test_labels)
self._next_data_index += 1
iterated_through_all_contributions = self._next_data_index >= self.get_num_contributions_in_market()
if iterated_through_all_contributions \
or not self._group_contributions \
or self._market_data[self._next_data_index].contributor_address != contribution.contributor_address:
# Need to compute score.
if self._reset_model_during_reward_phase:
# XXX Potentially expensive gas cost.
acc = self.model.evaluate(self.test_data, self.test_labels)
else:
acc = contribution.accuracy
score_change = acc - self.prev_acc
if self._group_contributions:
new_score = self._scores[contribution.contributor_address] = \
self._scores[contribution.contributor_address] + score_change
else:
new_score = contribution.score = score_change
if new_score < self._min_score:
self._min_score = new_score
if self._group_contributions:
self._worst_contributor = contribution.contributor_address
else:
self._worst_contribution = contribution
elif self._group_contributions and self._worst_contributor == contribution.contributor_address:
# Their score increased, they might not be the worst anymore.
# Optimize: use a heap.
self._worst_contributor, self._min_score = min(self._scores.items(), key=lambda x: x[1])
self.prev_acc = acc
if iterated_through_all_contributions:
# Find min score and remove that address from the list.
self._logger.debug("Minimum score: %.2f", self._min_score)
if self._min_score < 0:
if self._group_contributions:
num_rounds = self._market_balances[self._worst_contributor] / -self._min_score
else:
num_rounds = self._worst_contribution.balance / -self._min_score
if num_rounds > self.remaining_bounty_rounds:
num_rounds = self.remaining_bounty_rounds
self._logger.debug("Will simulate %.2f rounds.", num_rounds)
self.remaining_bounty_rounds -= num_rounds
if self.remaining_bounty_rounds == 0:
self._end_reward_phase(num_rounds)
else:
if self._group_contributions:
participants_to_remove = set()
for participant, score in self._scores.items():
self._logger.debug("Score for \"%s\": %.2f", participant, score)
self._market_balances[participant] += score * num_rounds
if self._market_balances[participant] < self._num_market_contributions[participant]:
# They don't have enough left to stake next time.
participants_to_remove.add(participant)
self._market_data: List[_Contribution] = list(
filter(lambda c: c.contributor_address not in participants_to_remove,
self._market_data))
else:
for contribution in self._market_data:
contribution.balance += contribution.score * num_rounds
if contribution.balance < 1:
# Contribution is going to get kicked out.
self._market_balances[contribution.contributor_address] += contribution.balance
self._market_data: List[_Contribution] = \
list(filter(lambda c: c.balance >= 1, self._market_data))
if self.get_num_contributions_in_market() == 0:
self.state = MarketPhase.REWARD_COLLECT
self.remaining_bounty_rounds = 0
self.reward_phase_end_time_s = self._time()
else:
self.state = MarketPhase.REWARD_RESTART
else:
num_rounds = self.remaining_bounty_rounds
self.remaining_bounty_rounds = 0
self._end_reward_phase(num_rounds)
def _end_reward_phase(self, num_rounds):
"""
Distribute rewards.
:param num_rounds: The number of rounds remaining.
"""
self._logger.debug("Dividing remaining bounty amongst all remaining contributors to simulate %.2f rounds.",
num_rounds)
self.reward_phase_end_time_s = self._time()
self.state = MarketPhase.REWARD_COLLECT
if self._group_contributions:
for participant, score in self._scores.items():
self._logger.debug("Score for \"%s\": %.2f", participant, score)
self._market_balances[participant] += score * num_rounds
else:
for contribution in self._market_data:
self._market_balances[contribution.contributor_address] += \
contribution.score * num_rounds
self._market_data = []
def handle_refund(self, submitter: Address, stored_data: StoredData,
claimable_amount: float, claimed_by_submitter: bool,
prediction) -> float:
assert self.remaining_bounty_rounds == 0, "The reward phase has not finished processing contributions."
assert self.state == MarketPhase.REWARD_COLLECT
result = self._market_balances[submitter]
self._logger.debug("Reward for \"%s\": %.2f", submitter, result)
if result > 0:
del self._market_balances[submitter]
else:
result = 0
return result
def handle_report(self, reporter: Address, stored_data: StoredData, claimed_by_reporter: bool, prediction) -> float:
assert self.state == MarketPhase.REWARD_COLLECT, "The reward phase has not finished processing contributions."
assert self.remaining_bounty_rounds == 0
assert self.reward_phase_end_time_s > 0
if self._time() - self.reward_phase_end_time_s >= self.any_address_claim_wait_time_s:
submitter = stored_data.sender
result = self._market_balances[submitter]
if result > 0:
self._logger.debug("Giving reward for \"%s\" to \"%s\". Reward: %s", submitter, reporter, result)
del self._market_balances[reporter]
else:
result = 0
return result
@dataclass
class PredictionMarketImModule(Module):
allow_greater_deposit: bool = field(default=False)
group_contributions: bool = field(default=False)
reset_model_during_reward_phase: bool = field(default=False)
@provider
@singleton
def provide_data_loader(self, builder: ClassAssistedBuilder[PredictionMarket]) -> IncentiveMechanism:
return builder.build(
allow_greater_deposit=self.allow_greater_deposit,
group_contributions=self.group_contributions,
reset_model_during_reward_phase=self.reset_model_during_reward_phase,
)
|
from abc import ABC, abstractmethod
import math
from decai.simulation.contract.data.data_handler import StoredData
from decai.simulation.contract.objects import Address, SmartContract
class IncentiveMechanism(ABC, SmartContract):
"""
Defines incentives for others to contribute "good" quality data.
"""
def __init__(self, refund_time_s=math.inf, any_address_claim_wait_time_s=math.inf):
super().__init__()
self.refund_time_s = refund_time_s
"""
Amount of time to wait to get a refund back.
Once this amount of time has passed, the entire deposit can be reclaimed.
Also once this amount of time has passed, the deposit (in full or in part) can be taken by others.
Default to not allowing refunds.
"""
self.any_address_claim_wait_time_s = any_address_claim_wait_time_s
"""
Amount of time after which anyone can take someone's entire remaining deposit.
The purpose of this is to help ensure that value does not get "stuck" in a contract.
This must be greater than the required amount of time to wait for attempting a refund.
Contracts may want to enforce that this is much greater than the amount of time to wait for attempting a refund
to give even more time to get the deposit back and not let others take too much.
"""
@abstractmethod
def distribute_payment_for_prediction(self, sender: str, value: float):
"""
Share `value` with those that submit data.
:param sender: The address of the one calling prediction.
:param value: The amount sent with the request to call prediction.
"""
pass
@abstractmethod
def handle_add_data(self, contributor_address: Address, msg_value: float, data, classification) \
-> (float, bool):
"""
Determine if the request to add data is acceptable.
:param contributor_address: The address of the one attempting to add data
:param msg_value: The value sent with the initial transaction to add data.
:param data: A single sample of training data for the model.
:param classification: The label for `data`.
:return: tuple
The cost required to add new data.
`True` if the model should be updated, `False` otherwise.
"""
pass
@abstractmethod
def handle_refund(self, submitter: str, stored_data: StoredData,
claimable_amount: float, claimed_by_submitter: bool,
prediction) -> float:
"""
Notify that a refund is being attempted.
:param submitter: The address of the one attempting a refund.
:param stored_data: The data for which a refund is being attempted.
:param claimable_amount: The amount that can be claimed for the refund.
:param claimed_by_submitter: True if the data has already been claimed by `submitter`, otherwise false.
:param prediction: The current prediction of the model for data
or a callable with no parameters to lazily get the prediction of the model on the data.
:return: The amount to refund to `submitter`.
"""
pass
@abstractmethod
def handle_report(self, reporter: str, stored_data: StoredData, claimed_by_reporter: bool, prediction) \
-> float:
"""
Notify that data is being reported as bad or old.
:param reporter: The address of the one reporting about the data.
:param stored_data: The data being reported.
:param claimed_by_reporter: True if the data has already been claimed by `reporter`, otherwise false.
:param prediction: The current prediction of the model for data
or a callable with no parameters to lazily get the prediction of the model on the data.
:return: The amount to reward to `reporter`.
"""
pass
|
import unittest
from collections import defaultdict
from typing import cast
from injector import Injector
from decai.simulation.contract.balances import Balances
from decai.simulation.contract.classification.perceptron import PerceptronModule
from decai.simulation.contract.data.data_handler import StoredData
from decai.simulation.contract.incentive.incentive_mechanism import IncentiveMechanism
from decai.simulation.contract.incentive.prediction_market import MarketPhase, \
PredictionMarket, PredictionMarketImModule
from decai.simulation.contract.objects import Msg, TimeMock
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.simple_data_loader import SimpleDataModule
from decai.simulation.logging_module import LoggingModule
class TestPredictionMarket(unittest.TestCase):
def test_market_like_original_paper(self):
inj = Injector([
SimpleDataModule,
LoggingModule,
PerceptronModule,
PredictionMarketImModule(
allow_greater_deposit=False,
group_contributions=False,
reset_model_during_reward_phase=False,
),
])
balances = inj.get(Balances)
data = inj.get(DataLoader)
im = cast(PredictionMarket, inj.get(IncentiveMechanism))
im.owner = 'owner'
assert isinstance(im, PredictionMarket)
init_train_data_portion = 0.2
initializer_address = 'initializer'
total_bounty = 100_000
balances.initialize(initializer_address, total_bounty)
good_contributor_address = 'good_contributor'
initial_good_balance = 10_000
balances.initialize(good_contributor_address, initial_good_balance)
bad_contributor_address = 'bad_contributor'
initial_bad_balance = 10_000
balances.initialize(bad_contributor_address, initial_bad_balance)
(x_train, y_train), (x_test, y_test) = data.load_data()
init_idx = int(len(x_train) * init_train_data_portion)
assert init_idx > 0
x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]
# Split test set into pieces.
num_pieces = 10
test_dataset_hashes, test_sets = im.get_test_set_hashes(num_pieces, x_test, y_test)
# Ending criteria:
min_length_s = 100
min_num_contributions = min(len(x_remaining), 100)
# Commitment Phase
self.assertIsNone(im.state)
im.model.init_model(x_init_data, y_init_data, save_model=True)
hashes_split = 3
test_reveal_index = im.initialize_market(Msg(initializer_address, total_bounty),
test_dataset_hashes[:hashes_split],
min_length_s, min_num_contributions)
assert 0 <= test_reveal_index < len(test_dataset_hashes)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
test_reveal_index = im.add_test_set_hashes(Msg(initializer_address, 0), test_dataset_hashes[hashes_split:])
assert 0 <= test_reveal_index < len(test_dataset_hashes)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
im.reveal_init_test_set(test_sets[test_reveal_index])
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
# Participation Phase
value = 100
total_deposits = defaultdict(float)
for i in range(min_num_contributions):
data = x_remaining[i]
classification = y_remaining[i]
if i % 2 == 0:
contributor = good_contributor_address
else:
contributor = bad_contributor_address
classification = 1 - classification
cost, _ = im.handle_add_data(contributor, value, data, classification)
self.assertEqual(im.min_stake, cost, "Cost should be the minimum stake because of the options passed in.")
balances.send(contributor, im.owner, cost)
total_deposits[contributor] += cost
# Reward Phase
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
im.end_market()
self.assertEqual(MarketPhase.REVEAL_TEST_SET, im.state)
for i, test_set_portion in enumerate(test_sets):
if i != test_reveal_index:
im.verify_next_test_set(test_set_portion)
self.assertEqual(MarketPhase.REWARD_RESTART, im.state)
while im.remaining_bounty_rounds > 0:
im.process_contribution()
# Collect rewards.
self.assertEqual(MarketPhase.REWARD_COLLECT, im.state)
for contributor in [good_contributor_address, bad_contributor_address]:
# Don't need to pass the right StoredData.
# noinspection PyTypeChecker
reward = im.handle_refund(contributor, None, 0, False, None)
balances.send(im.owner, contributor, reward)
self.assertGreater(total_deposits[good_contributor_address], 0)
self.assertGreater(total_deposits[bad_contributor_address], 0)
# General checks that should be true for a market with a reasonably sensitive model.
self.assertLess(balances[im.owner], total_bounty,
f"Some of the bounty should be distributed.\n"
f"Balances: {balances.get_all()}")
self.assertLess(0, balances[im.owner])
# Sometimes the bad contributor happens to get some value but not much.
self.assertAlmostEqual(balances[bad_contributor_address], initial_bad_balance, delta=2,
msg=f"The bad contributor should lose funds.\n"
f"Balances: {balances.get_all()}")
self.assertGreater(balances[good_contributor_address], initial_good_balance)
self.assertLess(balances[bad_contributor_address], balances[good_contributor_address])
self.assertLessEqual(balances[good_contributor_address] - balances[bad_contributor_address],
total_bounty)
self.assertEqual(initial_good_balance + initial_bad_balance + total_bounty,
balances[good_contributor_address] + balances[bad_contributor_address] +
balances[im.owner],
"Should be a zero-sum.")
def test_market(self):
inj = Injector([
SimpleDataModule,
LoggingModule,
PerceptronModule,
PredictionMarketImModule(
allow_greater_deposit=True,
group_contributions=True,
reset_model_during_reward_phase=True,
),
])
balances = inj.get(Balances)
data = inj.get(DataLoader)
im = cast(PredictionMarket, inj.get(IncentiveMechanism))
im.owner = 'owner'
assert isinstance(im, PredictionMarket)
init_train_data_portion = 0.2
initializer_address = 'initializer'
total_bounty = 100_000
balances.initialize(initializer_address, total_bounty)
good_contributor_address = 'good_contributor'
initial_good_balance = 10_000
balances.initialize(good_contributor_address, initial_good_balance)
bad_contributor_address = 'bad_contributor'
initial_bad_balance = 10_000
balances.initialize(bad_contributor_address, initial_bad_balance)
(x_train, y_train), (x_test, y_test) = data.load_data()
init_idx = int(len(x_train) * init_train_data_portion)
assert init_idx > 0
x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]
# Split test set into pieces.
num_pieces = 10
test_dataset_hashes, test_sets = im.get_test_set_hashes(num_pieces, x_test, y_test)
# Ending criteria:
min_length_s = 100
min_num_contributions = min(len(x_remaining), 100)
# Commitment Phase
self.assertIsNone(im.state)
im.model.init_model(x_init_data, y_init_data, save_model=True)
hashes_split = 3
test_reveal_index = im.initialize_market(Msg(initializer_address, total_bounty),
test_dataset_hashes[:hashes_split],
min_length_s, min_num_contributions)
assert 0 <= test_reveal_index < len(test_dataset_hashes)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
test_reveal_index = im.add_test_set_hashes(Msg(initializer_address, 0), test_dataset_hashes[hashes_split:])
assert 0 <= test_reveal_index < len(test_dataset_hashes)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
im.reveal_init_test_set(test_sets[test_reveal_index])
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
# Participation Phase
value = 100
total_deposits = defaultdict(float)
for i in range(min_num_contributions):
data = x_remaining[i]
classification = y_remaining[i]
if i % 2 == 0:
contributor = good_contributor_address
else:
contributor = bad_contributor_address
classification = 1 - classification
cost, _ = im.handle_add_data(contributor, value, data, classification)
balances.send(contributor, im.owner, cost)
total_deposits[contributor] += cost
# Reward Phase
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
im.end_market()
self.assertEqual(MarketPhase.REVEAL_TEST_SET, im.state)
for i, test_set_portion in enumerate(test_sets):
if i != test_reveal_index:
im.verify_next_test_set(test_set_portion)
self.assertEqual(MarketPhase.REWARD_RESTART, im.state)
while im.remaining_bounty_rounds > 0:
im.process_contribution()
# Collect rewards.
self.assertEqual(MarketPhase.REWARD_COLLECT, im.state)
for contributor in [good_contributor_address, bad_contributor_address]:
# Don't need to pass the right StoredData.
# noinspection PyTypeChecker
reward = im.handle_refund(contributor, None, 0, False, None)
balances.send(im.owner, contributor, reward)
self.assertGreater(total_deposits[good_contributor_address], 0)
self.assertGreater(total_deposits[bad_contributor_address], 0)
# General checks that should be true for a market with a reasonably sensitive model.
self.assertLess(balances[im.owner], total_bounty,
f"Some of the bounty should be distributed.\n"
f"Balances: {balances.get_all()}")
self.assertLess(0, balances[im.owner])
self.assertLess(balances[bad_contributor_address], initial_bad_balance)
self.assertGreater(balances[good_contributor_address], initial_good_balance)
self.assertLess(balances[bad_contributor_address], balances[good_contributor_address])
self.assertLessEqual(balances[good_contributor_address] - balances[bad_contributor_address],
total_bounty)
self.assertEqual(initial_good_balance + initial_bad_balance + total_bounty,
balances[good_contributor_address] + balances[bad_contributor_address] +
balances[im.owner],
"Should be a zero-sum.")
self.assertEqual(initial_bad_balance - total_deposits[bad_contributor_address],
balances[bad_contributor_address],
"The bad contributor should lose all of their deposits.")
def test_report(self):
inj = Injector([
SimpleDataModule,
LoggingModule,
PerceptronModule,
PredictionMarketImModule(
allow_greater_deposit=True,
group_contributions=True,
reset_model_during_reward_phase=True,
),
])
balances = inj.get(Balances)
data = inj.get(DataLoader)
im = cast(PredictionMarket, inj.get(IncentiveMechanism))
im.owner = 'owner'
time_method = inj.get(TimeMock)
assert isinstance(im, PredictionMarket)
init_train_data_portion = 0.2
initializer_address = 'initializer'
total_bounty = 100_000
balances.initialize(initializer_address, total_bounty)
good_contributor_address = 'good_contributor'
initial_good_balance = 10_000
balances.initialize(good_contributor_address, initial_good_balance)
bad_contributor_address = 'bad_contributor'
initial_bad_balance = 10_000
balances.initialize(bad_contributor_address, initial_bad_balance)
(x_train, y_train), (x_test, y_test) = data.load_data()
init_idx = int(len(x_train) * init_train_data_portion)
assert init_idx > 0
x_init_data, y_init_data = x_train[:init_idx], y_train[:init_idx]
x_remaining, y_remaining = x_train[init_idx:], y_train[init_idx:]
# Split test set into pieces.
num_pieces = 10
test_dataset_hashes, test_sets = im.get_test_set_hashes(num_pieces, x_test, y_test)
# Ending criteria:
min_length_s = 100
min_num_contributions = min(len(x_remaining), 100)
# Commitment Phase
self.assertIsNone(im.state)
im.model.init_model(x_init_data, y_init_data, save_model=True)
test_reveal_index = im.initialize_market(Msg(initializer_address, total_bounty),
test_dataset_hashes,
min_length_s, min_num_contributions)
self.assertEqual(MarketPhase.INITIALIZATION, im.state)
assert 0 <= test_reveal_index < len(test_dataset_hashes)
im.reveal_init_test_set(test_sets[test_reveal_index])
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
# Participation Phase
value = 100
total_deposits = defaultdict(float)
stored_data = None
for i in range(min_num_contributions):
time_method.add_time(60)
data = x_remaining[i]
classification = y_remaining[i]
if i % 2 == 0:
contributor = good_contributor_address
else:
contributor = bad_contributor_address
classification = 1 - classification
cost, _ = im.handle_add_data(contributor, value, data, classification)
if stored_data is None:
stored_data = StoredData(classification, time_method(), contributor, cost, cost)
balances.send(contributor, im.owner, cost)
total_deposits[contributor] += cost
# Reward Phase
self.assertEqual(MarketPhase.PARTICIPATION, im.state)
im.end_market()
time_method.add_time(60)
self.assertEqual(MarketPhase.REVEAL_TEST_SET, im.state)
for i, test_set_portion in enumerate(test_sets):
if i != test_reveal_index:
im.verify_next_test_set(test_set_portion)
self.assertEqual(MarketPhase.REWARD_RESTART, im.state)
while im.remaining_bounty_rounds > 0:
time_method.add_time(60)
im.process_contribution()
# Collect rewards.
self.assertEqual(MarketPhase.REWARD_COLLECT, im.state)
# Get some stored data.
# Make sure reporting doesn't work yet.
reward = im.handle_report(bad_contributor_address, stored_data, False, None)
self.assertEqual(0, reward, "There should be no reward yet.")
time_method.add_time(im.any_address_claim_wait_time_s)
reward = im.handle_report(bad_contributor_address, stored_data, False, None)
balances.send(im.owner, bad_contributor_address, reward)
# Don't need to pass the right StoredData.
# noinspection PyTypeChecker
reward = im.handle_refund(bad_contributor_address, None, 0, False, None)
balances.send(im.owner, bad_contributor_address, reward)
# General checks that should be true for a market with a reasonably sensitive model.
self.assertLess(balances[im.owner], total_bounty,
f"Some of the bounty should be distributed.\n"
f"Balances: {balances.get_all()}")
self.assertLess(0, balances[im.owner])
self.assertGreater(total_deposits[good_contributor_address], 0)
self.assertGreater(total_deposits[bad_contributor_address], 0)
# The bad contributor profited because they reported the good contributor.
self.assertGreater(balances[bad_contributor_address], initial_bad_balance)
self.assertLess(balances[good_contributor_address], initial_good_balance)
self.assertLess(balances[good_contributor_address], balances[bad_contributor_address])
self.assertLessEqual(balances[bad_contributor_address] - balances[good_contributor_address],
total_bounty)
self.assertEqual(initial_good_balance + initial_bad_balance + total_bounty,
balances[good_contributor_address] + balances[bad_contributor_address] +
balances[im.owner],
"Should be a zero-sum.")
self.assertEqual(initial_good_balance - total_deposits[good_contributor_address],
balances[good_contributor_address],
"The good contributor should lose all of their deposits.")
|
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Dict
import numpy as np
from injector import inject, singleton
from decai.simulation.contract.objects import Address, RejectException, SmartContract, TimeMock
@dataclass
class StoredData:
# Storing the data is not necessary. data: object
classification: object
time: int
sender: Address
# Need to use float since the numbers might be large. They should still actually be integers.
initial_deposit: float
"""
The amount that was initially given to deposit this data.
"""
claimable_amount: float
"""
The amount of the deposit that can still be claimed.
"""
claimed_by: Dict[Address, bool] = field(default_factory=lambda: defaultdict(bool))
@inject
@singleton
@dataclass
class DataHandler(SmartContract):
"""
Stores added training data and corresponding meta-data.
"""
_time: TimeMock
_added_data: Dict[tuple, StoredData] = field(default_factory=dict, init=False)
def __iter__(self):
return iter(self._added_data.items())
def _get_key(self, data, classification, added_time: int, original_author: Address):
if isinstance(data, np.ndarray):
# The `.tolist()` isn't necessary but is faster.
data = tuple(data.tolist())
else:
data = tuple(data)
return (data, classification, added_time, original_author)
def get_data(self, data, classification, added_time: int, original_author: Address) -> StoredData:
"""
:param data: The originally submitted features.
:param classification: The label originally submitted for `data`.
:param added_time: The time in seconds for which the data was added.
:param original_author: The address that originally added the data.
:return: The stored information for the data.
"""
key = self._get_key(data, classification, added_time, original_author)
result = self._added_data.get(key)
return result
def handle_add_data(self, contributor_address: Address, cost, data, classification):
"""
Log an attempt to add data
:param sender: The address of the one attempting to add data
:param cost: The cost required to add new data.
:param data: A single sample of training data for the model.
:param classification: The label for `data`.
"""
current_time_s = self._time()
key = self._get_key(data, classification, current_time_s, contributor_address)
if key in self._added_data:
raise RejectException("Data has already been added.")
d = StoredData(classification, current_time_s, contributor_address, cost, cost)
self._added_data[key] = d
def handle_refund(self, submitter: Address, data, classification, added_time: int) -> (float, bool, StoredData):
"""
Log a refund attempt.
:param submitter: The address of the one attempting a refund.
:param data: The data for which to attempt a refund.
:param classification: The label originally submitted for `data`.
:param added_time: The time in seconds for which the data was added.
:return:
The amount that can be claimed for the refund.
True if the data has already been claimed by `submitter`, otherwise false.
The stored data.
"""
stored_data = self.get_data(data, classification, added_time, submitter)
assert stored_data is not None, "Data not found."
assert stored_data.sender == submitter, "Data isn't from the sender."
claimable_amount = stored_data.claimable_amount
claimed_by_submitter = stored_data.claimed_by[submitter]
return (claimable_amount, claimed_by_submitter, stored_data)
def handle_report(self, reporter: Address, data, classification, added_time: int, original_author: Address) \
-> (bool, StoredData):
"""
Retrieve information about the data to report.
:param reporter: The address of the one reporting the data.
:param data: The data to report.
:param classification: The label originally submitted for `data`.
:param added_time: The time in seconds for which the data was added.
:param original_author: The address that originally added the data.
:return:
True if the data has already been claimed by `submitter`, otherwise false.
The stored data.
"""
stored_data = self.get_data(data, classification, added_time, original_author)
assert stored_data is not None, "Data not found."
claimed_by_reporter = stored_data.claimed_by[reporter]
# The Solidity implementation updates `stored_data.claimed_by` here which is fine.
# We do not update it here because if an error occurs while attempting a refund,
# then the change would have to be undone.
# Instead, `stored_data.claimed_by` is updated in `update_claimable_amount`.
return (claimed_by_reporter, stored_data)
def update_claimable_amount(self, receiver: Address, stored_data: StoredData, reward_amount: float):
# The Solidity implementation does the update in another place which is fine for it.
# Here we only update it once we're sure the refund can be completed successfully.
if reward_amount > 0:
stored_data.claimed_by[receiver] = True
stored_data.claimable_amount -= reward_amount
|
import unittest
from queue import PriorityQueue
from decai.simulation.simulate import Agent
class TestAgent(unittest.TestCase):
def test_queue(self):
q = PriorityQueue()
agents = [
Agent('a1', 10, 1, 1, 1),
Agent('a2', 10, 1, 1, 1),
Agent('a0', 10, 1, 1, 1),
]
[q.put((0, a)) for a in agents]
results = [q.get()[1].address for _ in agents]
self.assertEqual(['a0', 'a1', 'a2'], results)
|
from dataclasses import dataclass, field
from logging import Logger
from typing import List
import numpy as np
from injector import inject, Module
from sklearn.utils import shuffle
from tqdm import trange
from .data_loader import DataLoader
@inject
@dataclass
class TicTacToeDataLoader(DataLoader):
"""
Load data for Tic-Tac-Toe games.
Data is flattened `width` x `length` games.
The players are 1 and -1. The data is from the perspective of player 1, opponent is -1.
0 means no one has played in that position.
"""
_logger: Logger
_seed: int = field(default=2, init=False)
_train_split: float = field(default=0.7, init=False)
width: int = field(default=3, init=False)
length: int = field(default=3, init=False)
def classifications(self) -> List[str]:
return list(map(str, map(self.map_pos, range(self.width * self.length))))
def get_winner(self, board):
def get_single_winner(line: set):
if len(line) == 1:
val = next(iter(line))
if val != 0:
return val
return None
for row in range(self.width):
result = get_single_winner(set(board[row]))
if result is not None:
return result
for col in range(self.length):
result = get_single_winner(set(board[:, col]))
if result is not None:
return result
result = get_single_winner(set(board.diagonal()))
if result is not None:
return result
diag_vals = set(board[i, self.length - 1 - i] for i in range(self.width))
result = get_single_winner(diag_vals)
if result is not None:
return result
return None
def map_pos(self, pos):
return pos // self.width, pos % self.width
def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple):
X, y = [], []
bad_moves = set()
players = (1, -1)
assert self.width == self.length, "The following code assumes that the board is square."
def fill(board, start_pos, next_player, path):
# See if there is a winning move.
winner = None
for pos in range(start_pos, self.width * self.length):
i, j = self.map_pos(pos)
if board[i, j] != 0:
continue
_board = board.copy()
_board[i, j] = next_player
winner = self.get_winner(_board)
if winner is not None:
path.append((board, pos, next_player))
break
if winner is not None:
# Only count wins for one of the players to make setting up games simpler.
if winner == players[0]:
for history_board, history_position, history_player in path:
history_board = history_board.flatten()
if history_player == winner:
X.append(history_board)
y.append(history_position)
else:
bad_moves.add((tuple(-history_board.flatten()), -history_position))
else:
# Recurse.
for pos in range(start_pos, self.width * self.length):
i, j = self.map_pos(pos)
if board[i, j] != 0:
continue
_path = list(path)
_path.append((board, pos, next_player))
_board = board.copy()
_board[i, j] = next_player
fill(_board, start_pos, next_player=-1 if next_player == 1 else 1, path=_path)
self._logger.info("Loading Tic Tac Toe data.")
for init_pos in trange(self.width * self.length,
desc="Making boards",
unit_scale=True, mininterval=2, unit=" start positions"
):
pos = self.map_pos(init_pos)
for player in players:
board = np.zeros((self.width, self.length), dtype=np.int8)
path = [(board.copy(), init_pos, player)]
board[pos] = player
fill(board, init_pos + 1, next_player=-1 if player == 1 else 1, path=path)
# Remove bad moves.
# Note this might not help much depending on the model.
X, y = zip(*[(X[i], y[i]) for i in range(len(X)) if (tuple(X[i]), y[i]) not in bad_moves])
X, y = shuffle(X, y, random_state=self._seed)
split = int(self._train_split * len(X))
x_train, y_train = np.array(X[:split]), np.array(y[:split])
x_test, y_test = np.array(X[split:]), np.array(y[split:])
if train_size is not None:
x_train, y_train = x_train[:train_size], y_train[:train_size]
if test_size is not None:
x_test, y_test = x_test[:test_size], y_test[:test_size]
# Show some data.
# import random
# for _ in range(10):
# i = random.randrange(len(X))
# print(X[i].reshape((self.width, self.length)), y[i])
self._logger.info("Done loading data.\nCreated %d boards.", len(X))
return (x_train, y_train), (x_test, y_test)
class TicTacToeDataModule(Module):
def configure(self, binder):
binder.bind(DataLoader, TicTacToeDataLoader)
|
import os
from dataclasses import dataclass, field
from logging import Logger
from typing import List
import numpy as np
import pandas as pd
from injector import inject, Module
from sklearn.utils import shuffle
from decai.simulation.data.data_loader import DataLoader
@inject
@dataclass
class TitanicDataLoader(DataLoader):
"""
Load data for Titanic survivors.
https://www.kaggle.com/c/titanic/data
"""
_logger: Logger
_seed: int = field(default=231, init=False)
_train_split: float = field(default=0.7, init=False)
def classifications(self) -> List[str]:
return ["DIED", "SURVIVED"]
def _get_features(self, data: pd.DataFrame):
"""
Map the data to numbers.
Also uses some ideas from https://triangleinequality.wordpress.com/2013/09/08/basic-feature-engineering-with-the-titanic-data/
:param data: The data without labels.
:return: The data mapped to numbers.
"""
data.drop(columns=['PassengerId', 'Ticket'], inplace=True)
# , 'Name', 'Ticket', 'Cabin', 'Embarked'
title_tuples = (
(' Mr. ', ' Sir. ', ' Don. ', ' Major. ', ' Capt. ', ' Jonkheer. ', ' Rev. ', ' Col. '),
(' Mrs. ', ' Countess. ', ' Mme. ', ' Lady. '),
(' Miss. ', ' Mlle. ', ' Ms. '),
(' Master. ',),
(' Dr. ',),
)
title_to_num = {
' Mr. ': 0,
' Mrs. ': 1,
' Miss. ': 2,
' Master. ': 3,
}
def _get_title(row):
result = None
name = row['Name']
for index, titles in enumerate(title_tuples):
for t in titles:
if t in name:
result = titles[0]
if result == ' Dr. ':
if row['Sex'] == 'male':
result = ' Mr. '
else:
result = ' Mrs. '
assert result is not None, f"No title found in {row}."
result = title_to_num[result]
return result
def _get_cabin(row):
result = -1
cabin = row['Cabin']
if isinstance(cabin, str):
for c in 'ABCDEFGT':
if c in cabin:
result = ord(c) - ord('A')
break
return result
result = []
for index, row in data.iterrows():
if row['Sex'] == 'male':
sex = 0
else:
sex = 1
family_size = row['SibSp'] + row['Parch']
datum = [
row['Pclass'],
sex,
_get_title(row),
family_size,
# These features did not help:
# _get_cabin(row),
# row['Age'],
# row['Parch'],
# row['SibSp'],
# row['Fare'],
# row['Fare'] / (family_size + 1),
]
result.append(datum)
return result
def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple):
self._logger.info("Loading data.")
data_folder_path = os.path.join(__file__, '../../../../training_data/titanic')
if not os.path.exists(data_folder_path):
# TODO Attempt to download the data.
raise Exception(f"Could not find Titanic dataset at \"{data_folder_path}\"."
"\nYou must download it from https://www.kaggle.com/c/titanic/data.")
x_train = pd.read_csv(os.path.join(data_folder_path, 'train.csv'))
y_train = np.array(x_train['Survived'], np.int8)
x_train.drop(columns=['Survived'], inplace=True)
x_train = self._get_features(x_train)
x_train = np.array(x_train)
x_train, y_train = shuffle(x_train, y_train, random_state=self._seed)
train_split = int(len(x_train) * self._train_split)
x_test, y_test = x_train[train_split:], y_train[train_split:]
x_train, y_train = x_train[:train_split], y_train[:train_split]
if train_size is not None:
x_train, y_train = x_train[:train_size], y_train[:train_size]
if test_size is not None:
x_test, y_test = x_test[:test_size], y_test[:test_size]
self._logger.info("Done loading data.")
return (x_train, y_train), (x_test, y_test)
@dataclass
class TitanicDataModule(Module):
def configure(self, binder):
binder.bind(DataLoader, to=TitanicDataLoader)
|
from abc import ABC, abstractmethod
from typing import List
class DataLoader(ABC):
"""
Base class for providing simulation data.
"""
@abstractmethod
def classifications(self) -> List[str]:
"""
:return: The classifications for this dataset.
"""
pass
@abstractmethod
def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple):
"""
:return: Training Data, Test Data: (x_train, y_train), (x_test, y_test)
"""
pass
|
from dataclasses import dataclass
from logging import Logger
from typing import List
from injector import inject, Module
from keras.datasets import boston_housing
from decai.simulation.data.data_loader import DataLoader
@inject
@dataclass
class BhpDataLoader(DataLoader):
"""
Load data from Boston Housing Prices.
https://keras.io/datasets/#boston-housing-price-regression-dataset
"""
_logger: Logger
def classifications(self) -> List[str]:
raise NotImplementedError
def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple):
self._logger.info("Loading Boston housing prices data.")
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
if train_size is not None:
x_train, y_train = x_train[:train_size], y_train[:train_size]
if test_size is not None:
x_test, y_test = x_test[:test_size], y_test[:test_size]
self._logger.info("Done loading data.")
return (x_train, y_train), (x_test, y_test)
@dataclass
class BhpDataModule(Module):
def configure(self, binder):
binder.bind(DataLoader, to=BhpDataLoader)
|
import itertools
import json
import os
import random
import time
from collections import Counter
from dataclasses import dataclass
from enum import Enum
from logging import Logger
from operator import itemgetter
from pathlib import Path
from typing import Collection, List, Optional, Tuple
import numpy as np
import pandas as pd
import spacy
from injector import ClassAssistedBuilder, inject, Module, provider, singleton
from sklearn.feature_extraction.text import TfidfVectorizer
from spacy.cli import download
from tqdm import tqdm
from .data_loader import DataLoader
class Label(Enum):
RELIABLE = 0
UNRELIABLE = 1
@dataclass
class News:
text: Optional[str]
label: Label
@inject
@dataclass
class _SignalMediaDataLoader(DataLoader):
"""
INCOMPLETE BECAUSE MAPPING THE SOURCE NAMES TO DOMAIN NAMES IS TRICKY.
See https://github.com/aldengolab/fake-news-detection/issues/4
Following logic of https://github.com/aldengolab/fake-news-detection.
Requires the Signal Media dataset from http://research.signalmedia.co/newsir16/signal-dataset.html to be at
simulation/training_data/news/sample-1M.jsonl
and https://github.com/OpenSourcesGroup/opensources with sources.json in simulation/training_data/news/
"""
_logger: Logger
_media_types = {'News'}
def classifications(self) -> List[str]:
raise NotImplementedError
def find_source_site(self, source_name: str, sources: Collection[str]) -> Optional[str]:
"""
:param source_name: The name of the source.
:param sources: Source domain names.
:return: The source domain name from `sources` or `None` if no mapping can be found.
"""
# TODO
result = None
return result
def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple):
data_folder_path = os.path.join(__file__, '../../../../training_data/news')
signal_data_path = os.path.join(data_folder_path, 'sample-1M.jsonl')
if not os.path.exists(signal_data_path):
raise Exception(f"Could not find the Signal Media dataset at \"{signal_data_path}\"."
"\nYou must obtain it from http://research.signalmedia.co/newsir16/signal-dataset.html"
f" and follow the instructions to obtain it. Then extract it to \"{signal_data_path}\".")
sources_path = os.path.join(data_folder_path, 'sources.json')
if not os.path.exists(sources_path):
raise Exception(f"Could not find the sources dataset at \"{sources_path}\"."
"\nYou must obtain it from https://github.com/OpenSourcesGroup/opensources and put"
f" sources.json in \"{data_folder_path}\".")
with open(sources_path) as f:
loaded_sources = json.load(f)
sources = dict()
for source, info in loaded_sources.items():
problem_types = (info['type'], info['2nd type'], info['3rd type'])
sources[source] = set(filter(None, problem_types))
self._logger.info("Found %d sources with labels.", len(sources))
# Name: website name in `sources`.
source_mapping = {}
not_found_flag = -1
with open(signal_data_path) as f:
for index, line in tqdm(enumerate(f),
desc="Filtering news articles",
unit_scale=True, mininterval=2, unit=" articles"
):
news = json.loads(line)
news_id = news['id']
title = news['title']
text = news['content']
source = news['source']
# media-type is either "News" or "Blog"
media_type = news['media-type']
published_date = news['published']
if media_type not in self._media_types:
continue
source_site = source_mapping.get(source)
if source_site is None:
source_site = self.find_source_site(source, sources)
if source_site is not None:
source_mapping[source] = source_site
else:
source_mapping[source] = not_found_flag
continue
elif source_site == not_found_flag:
continue
# TODO Use article and set label.
with open(os.path.join(data_folder_path, 'source_mapping.json')) as f:
sorted(source_mapping.items(), key=itemgetter(0))
self._logger.info("Found %d sources in the articles.", len(source_mapping))
# TODO Set up output.
(x_train, y_train), (x_test, y_test) = (None, None), (None, None)
if train_size is not None:
x_train, y_train = x_train[:train_size], y_train[:train_size]
if test_size is not None:
x_test, y_test = x_test[:test_size], y_test[:test_size]
self._logger.info("Done loading news data.")
return (x_train, y_train), (x_test, y_test)
@inject
@dataclass
class NewsDataLoader(DataLoader):
"""
Load data from news sources.
Requires data from https://www.kaggle.com/c/fake-news/data to be saved to "simulation/trainin_data/news/fake-news/train.csv".
"""
_logger: Logger
_train_split = 0.7
_replace_entities_enabled = False
"""
If True, entities will be replaced in text with the entity's label surrounded by angle brackets: "<LABEL>".
Accuracy with replacement: 0.9172
Accuracy without replacement: 0.9173
Disabled because using spaCy is slow, it will be tricky to use spaCy in JavaScript,
and it didn't change the evaluation metrics much.
"""
_entity_types_to_replace = {'PERSON', 'GPE', 'ORG', 'DATE', 'TIME', 'PERCENT',
'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL'}
def classifications(self) -> List[str]:
return ["RELIABLE", "UNRELIABLE"]
def __post_init__(self):
spacy_model = 'en_core_web_lg'
download(spacy_model)
self._nlp = spacy.load(spacy_model, disable={'tagger', 'parser', 'textcat'})
def _load_kaggle_data(self, data_folder_path: str) -> Collection[News]:
"""
Load data from https://www.kaggle.com/c/fake-news/data.
"""
# Don't use the test data because it has no labels.
fake_news_data_path = os.path.join(data_folder_path, 'fake-news', 'train.csv')
if not os.path.exists(fake_news_data_path):
raise Exception(f"Could not find the Fake News dataset at \"{fake_news_data_path}\"."
"\nYou must obtain it from https://www.kaggle.com/c/fake-news/data.")
data = pd.read_csv(fake_news_data_path, na_values=dict(text=[]), keep_default_na=False)
result = []
for row in data.itertuples():
label = Label.RELIABLE if row.label == 0 else Label.UNRELIABLE
if len(row.text) > 0:
result.append(News(row.text, label))
# Consistent shuffle to aim for a mostly even distribution of labels.
random.shuffle(result, lambda: 0.618)
return result
def _replace_entities(self, doc) -> str:
# Remove names in text using spaCy.
result = doc.text
for ent in doc.ents[::-1]:
if ent.label_ in self._entity_types_to_replace:
result = result[:ent.start_char] + "<" + ent.label_ + ">" + result[ent.end_char:]
return result
def _pre_process_text(self, doc) -> str:
# TODO Remove name of news sources.
if self._replace_entities_enabled:
result = self._replace_entities(doc)
else:
assert isinstance(doc, str)
result = doc
return result
def _pre_process(self, news_articles: Collection[News], train_size: int, test_size: int) -> \
Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]:
self._logger.info("Getting features for %d articles.", len(news_articles))
# Only use binary features.
ngram_range = (2, 2)
# Don't use IDF because we need integer features.
t = TfidfVectorizer(max_features=1000, ngram_range=ngram_range, norm=None, use_idf=False)
test_start = len(news_articles) - test_size
x_train = map(lambda news: news.text, itertools.islice(news_articles, train_size))
x_test = map(lambda news: news.text, itertools.islice(news_articles, test_start, len(news_articles)))
if self._replace_entities_enabled:
self._logger.debug("Will replace entities.")
x_train = self._nlp.pipe(x_train, batch_size=128)
x_test = self._nlp.pipe(x_test, batch_size=128)
else:
self._logger.debug("Replacing entities is disabled.")
x_train = map(self._pre_process_text, x_train)
x_test = map(self._pre_process_text, x_test)
x_train = t.fit_transform(tqdm(x_train,
desc="Processing training data",
total=train_size,
unit_scale=True, mininterval=2,
unit=" articles"
)).toarray()
x_test = t.transform(tqdm(x_test,
desc="Processing testing data",
total=test_size,
unit_scale=True, mininterval=2,
unit=" articles"
)).toarray()
y_train = np.array([news.label.value for news in itertools.islice(news_articles, train_size)], np.int8)
y_test = np.array([news.label.value for news in itertools.islice(news_articles,
test_start, len(news_articles))], np.int8)
self._logger.debug("Training labels: %s", Counter(y_train))
self._logger.debug("Test labels: %s", Counter(y_test))
self._logger.info("Done getting features.")
return (x_train, y_train), (x_test, y_test)
def load_data(self, train_size: int = None, test_size: int = None) -> \
Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]:
self._logger.info("Loading news data.")
data_folder_path = os.path.join(__file__, '../../../../training_data/news')
# Look for cached data.
file_identifier = f'news-data-{train_size}-{test_size}-replace_ents_{self._replace_entities_enabled}.npy'
base_path = Path(os.path.dirname(__file__)) / 'cached_data'
os.makedirs(base_path, exist_ok=True)
cache_paths = {
'x_train': base_path / f'x_train-{file_identifier}',
'y_train': base_path / f'y_train-{file_identifier}',
'x_test': base_path / f'x_test-{file_identifier}',
'y_test': base_path / f'y_test-{file_identifier}'
}
# Use if modified in the last day.
if all([p.exists() for p in cache_paths.values()]) and \
all([time.time() - p.stat().st_mtime < 60 * 60 * 24 for p in cache_paths.values()]):
self._logger.info("Loaded cached News data from %s.", cache_paths)
return (np.load(cache_paths['x_train']), np.load(cache_paths['y_train'])), \
(np.load(cache_paths['x_test']), np.load(cache_paths['y_test']))
data = self._load_kaggle_data(data_folder_path)
# Separate train and test data.
if train_size is None:
if test_size is None:
train_size = int(self._train_split * len(data))
else:
train_size = len(data) - test_size
if test_size is None:
test_size = len(data) - train_size
if train_size + test_size > len(data):
raise Exception("There is not enough data for the requested sizes."
f"\n data size: {len(data)}"
f"\n train size: {train_size}"
f"\n test size: {test_size}")
(x_train, y_train), (x_test, y_test) = self._pre_process(data, train_size, test_size)
np.save(cache_paths['x_train'], x_train, allow_pickle=False)
np.save(cache_paths['y_train'], y_train, allow_pickle=False)
np.save(cache_paths['x_test'], x_test, allow_pickle=False)
np.save(cache_paths['y_test'], y_test, allow_pickle=False)
self._logger.info("Done loading news data.")
return (x_train, y_train), (x_test, y_test)
@dataclass
class NewsDataModule(Module):
@provider
@singleton
def provide_data_loader(self, builder: ClassAssistedBuilder[NewsDataLoader]) -> DataLoader:
return builder.build()
|
from dataclasses import dataclass
from logging import Logger
from typing import List
import numpy as np
from injector import Binder, inject, Module
from decai.simulation.data.data_loader import DataLoader
@inject
@dataclass
class SimpleDataLoader(DataLoader):
"""
Load simple data for testing.
"""
_logger: Logger
def classifications(self) -> List[str]:
return ["0", "1"]
def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple):
def _ground_truth(data):
if data[0] * data[2] > 0:
return 1
else:
return 0
x_train = np.array([
[0, 0, 0],
[1, 1, 1],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[0, 0, 2],
[0, 2, 0],
[2, 0, 0],
[2, 0, 2],
[0, 0, -3],
[0, 3, 0],
[0, 3, -3],
[0, -3, 3],
[0, 0, 4],
[0, 4, 4],
[4, 0, 0],
[-6, 0, 0],
])
x_test = np.array([
[0, 2, 2],
[0, 1, -1],
[-1, 0, 0],
[0, -1, 0],
[1, -1, 2],
[0, 0, 3],
[0, -2, 0],
[0, 2, -2],
[3, 0, 0],
[-2, 0, 2],
[2, -2, 0],
])
if train_size is not None:
x_train = x_train[:train_size]
if test_size is not None:
x_test = x_test[:test_size]
y_train = [_ground_truth(x) for x in x_train]
y_test = [_ground_truth(x) for x in x_test]
return (x_train, y_train), (x_test, y_test)
class SimpleDataModule(Module):
"""
Set up a `DataLoader` mainly for testing.
"""
def configure(self, binder: Binder):
binder.bind(DataLoader, to=SimpleDataLoader)
|
import ast
import logging
import os
import re
import time
from collections import Counter
from dataclasses import dataclass, field
from logging import Logger
from pathlib import Path
from typing import List, Set, Tuple
import numpy as np
from injector import ClassAssistedBuilder, inject, Module, provider, singleton
from sklearn.utils import shuffle
from tqdm import tqdm
from .data_loader import DataLoader
@inject
@dataclass
class FitnessDataLoader(DataLoader):
"""
Load sport activity data from Endomondo.
Requires endomondoHR_proper.json from https://sites.google.com/eng.ucsd.edu/fitrec-project/home to be stored at simulation/training_data/fitness/endomondoHR_proper.json.
From the first 5K samples, the 2842 'bike' and 2158 'run' occurrences.
Some info from the fire 10K samples:
genders: 'male', 'unknown', 'female'
sports: 'bike', 'bike (transport)', 'run', 'kayaking', 'indoor cycling', 'mountain bike', 'orienteering',
'core stability training', 'walk', 'cross-country skiing', 'fitness walking', 'roller skiing'
"""
_logger: Logger
_seed: int = field(default=2, init=False)
_train_split: float = field(default=0.7, init=False)
_classes: Set[str] = field(default_factory=lambda: {'bike', 'run'}, init=False)
def classifications(self) -> List[str]:
return ["BIKING", "RUNNING"]
def load_data(self, train_size: int = None, test_size: int = None) -> (Tuple, Tuple):
self._logger.info("Loading Endomondo fitness data.")
# Look for cached data.
file_identifier = f'fitness-data-{train_size}-{test_size}.npy'
base_path = Path(os.path.dirname(__file__)) / 'cached_data'
os.makedirs(base_path, exist_ok=True)
cache_paths = {
'x_train': base_path / f'x_train-{file_identifier}',
'y_train': base_path / f'y_train-{file_identifier}',
'x_test': base_path / f'x_test-{file_identifier}',
'y_test': base_path / f'y_test-{file_identifier}'
}
# Use if modified in the last day.
if all([p.exists() for p in cache_paths.values()]) and \
all([time.time() - p.stat().st_mtime < 60 * 60 * 24 for p in cache_paths.values()]):
self._logger.info("Loaded cached Endomondo fitness data from %s.", cache_paths)
return (np.load(cache_paths['x_train']), np.load(cache_paths['y_train'])), \
(np.load(cache_paths['x_test']), np.load(cache_paths['y_test']))
data = []
labels = []
data_folder_path = Path(__file__, '../../../../training_data/fitness').resolve()
user_id_to_set = {}
sport_to_label = {
'bike': 0,
'run': 1
}
gender_to_index = {}
if train_size is not None and test_size is not None:
max_num_samples = train_size + test_size
else:
max_num_samples = 10_000
classes = '|'.join(self._classes)
classes_pattern = re.compile(f' \'sport\': \'({classes})\', ')
data_path = data_folder_path / 'endomondoHR_proper.json'
assert data_path.exists(), f"See the documentation for how to download the dataset. It must be stored at {data_path}"
with open(data_path) as f, \
tqdm(f,
desc="Loading data",
unit_scale=True, mininterval=2, unit=" samples",
total=max_num_samples,
) as pbar:
for line in f:
# TODO Keep users in train set mutually exclusive from users in test set.
# Check line before more expensive parsing.
if not classes_pattern.search(line):
continue
record = ast.literal_eval(line)
sport = record['sport']
if sport not in self._classes:
continue
if 'speed' not in record:
continue
label = sport_to_label[sport]
labels.append(label)
heart_rates = record['heart_rate']
gender = gender_to_index.setdefault(record['gender'], len(gender_to_index))
speeds = record['speed']
# Other fields:
# record['longitude']
# record['altitude']
# record['latitude']
# record['id']
# record['timestamp']
# record['userId']
data.append({
# Values to keep as they are:
'rawValues':
[
np.mean(heart_rates) / np.min(heart_rates),
np.median(heart_rates) / np.min(heart_rates),
np.max(speeds),
np.min(speeds),
np.mean(speeds),
np.median(speeds),
],
# Values that need to be converted:
'gender': gender,
})
pbar.update()
if len(data) >= max_num_samples:
break
if train_size is None:
if test_size is None:
train_size = int(self._train_split * len(data))
else:
train_size = len(data) - test_size
if test_size is None:
test_size = len(data) - train_size
# Thresholds for making sure features can be discretized for Naive Bayes.
# Just use training data to make thresholds.
thresholds = np.empty(len(data[0]['rawValues']), dtype=np.int32)
for i in range(len(data[0]['rawValues'])):
thresholds[i] = np.median([d['rawValues'][i] for d in data[:train_size]])
def _featurize(datum):
raw_values = np.array(thresholds < datum['rawValues'], dtype=np.int8)
gender_one_hot = np.zeros(len(gender_to_index), dtype=np.int8)
gender_one_hot[datum['gender']] = 1
return np.concatenate([raw_values, gender_one_hot])
if self._logger.isEnabledFor(logging.DEBUG):
self._logger.debug("Labels: %s", Counter(labels))
data, labels = shuffle(data, labels, random_state=self._seed)
x_train = np.array([_featurize(d) for d in data[:train_size]])
y_train = np.array(labels[:train_size])
x_test = np.array([_featurize(d) for d in data[-test_size:]])
y_test = np.array(labels[-test_size:])
np.save(cache_paths['x_train'], x_train, allow_pickle=False)
np.save(cache_paths['y_train'], y_train, allow_pickle=False)
np.save(cache_paths['x_test'], x_test, allow_pickle=False)
np.save(cache_paths['y_test'], y_test, allow_pickle=False)
self._logger.info("Done loading Endomondo fitness data.")
return (x_train, y_train), (x_test, y_test)
@dataclass
class FitnessDataModule(Module):
@provider
@singleton
def provide_data_loader(self, builder: ClassAssistedBuilder[FitnessDataLoader]) -> DataLoader:
return builder.build()
|
import html
import itertools
import os
from collections import Counter
from dataclasses import dataclass, field
from logging import Logger
from pathlib import Path
from typing import Dict, Iterator, List, Tuple
import numpy as np
import pandas as pd
import requests
from injector import ClassAssistedBuilder, Module, inject, provider, singleton
from scipy.sparse import csr_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils import shuffle
from tqdm import tqdm
from .data_loader import DataLoader
from .featuremapping.hashing.token_hash import TokenHash
@inject
@dataclass
class OffensiveDataLoader(DataLoader):
"""
Load offensive data from https://github.com/t-davidson/hate-speech-and-offensive-language.
"""
_logger: Logger
_token_hash: TokenHash
max_num_features: int
_seed: int = field(default=2, init=False)
_train_split: float = field(default=0.7, init=False)
_class_mapping = [
# Hate
0,
# Offensive
0,
# Neither (Safe)
1,
]
def classifications(self) -> List[str]:
return ["OFFENSIVE", "SAFE"]
def load_data(self, train_size: int = None, test_size: int = None) -> (Tuple, Tuple):
self._logger.info("Loading data.")
data_folder_path = Path(__file__,
'../../../../training_data/offensive/hate-speech-and-offensive-language').resolve()
if train_size is not None and test_size is not None:
max_num_samples = train_size + test_size
else:
max_num_samples = None
data_path = data_folder_path / 'labeled_data.csv'
if not data_path.exists():
data_url = 'https://github.com/t-davidson/hate-speech-and-offensive-language/raw/master/data/labeled_data.csv'
self._logger.info("Downloading data from \"%s\" to \"%s\".", data_url, data_path)
r = requests.get(data_url, allow_redirects=True)
r.raise_for_status()
os.makedirs(data_folder_path, exist_ok=True)
with open(data_path, 'wb') as f:
f.write(r.content)
loaded_data = pd.read_csv(data_path)
data = []
labels = []
class_index = list(loaded_data.columns).index('class') + 1
assert class_index > 0
for row in tqdm(loaded_data.itertuples(),
desc="Loading data",
unit_scale=True, mininterval=2, unit=" samples",
total=max_num_samples or len(loaded_data),
):
if max_num_samples is not None and len(data) > max_num_samples:
break
text = row.tweet
text = self._pre_process(text)
data.append(text)
labels.append(self._class_mapping[row[class_index]])
if train_size is None:
if test_size is None:
train_size = int(self._train_split * len(data))
else:
train_size = len(data) - test_size
if test_size is None:
test_size = len(data) - train_size
data, labels = shuffle(data, labels, random_state=self._seed)
x_train = itertools.islice(data, train_size)
# Compute the top features.
t = TfidfVectorizer(max_features=self.max_num_features, norm=None)
t.fit(tqdm(x_train,
desc="Computing top token features",
total=train_size,
unit_scale=True, mininterval=2,
unit=" texts"
))
top_tokens = t.get_feature_names()
self._logger.debug("Some top feature names: %s", top_tokens[:30])
tokenize = t.build_analyzer()
feature_tokens = set(t.get_feature_names())
def _featurize(text: str) -> Dict[int, int]:
result = Counter(tokenize(text))
return {self._token_hash.hash(token): count
for token, count in result.items()
if token in feature_tokens}
x_train = map(_featurize, itertools.islice(data, train_size))
x_train = self._build_sparse_matrix(x_train)
y_train = np.array(labels[:train_size])
x_test = map(_featurize, itertools.islice(data, len(data) - test_size, len(data)))
# TODO Might have to might sure it has the same number of columns as x_train.
x_test = self._build_sparse_matrix(x_test)
y_test = np.array(labels[-test_size:])
self._logger.info("Done loading data.")
return (x_train, y_train), (x_test, y_test)
def _pre_process(self, text: str) -> str:
""" Handle some simple pre-processing specific to this dataset. """
return html.unescape(text)
def _build_sparse_matrix(self, feature_mapped_data: Iterator[Dict[int, int]]):
# Make a sparse matrix following the term-document example from:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
data = []
indptr = [0]
indices = []
for feature_indices in feature_mapped_data:
if len(feature_indices) > 0:
i, d = zip(*feature_indices.items())
indices.extend(i)
data.extend(d)
indptr.append(len(indices))
return csr_matrix((data, indices, indptr), dtype=np.uint8)
@dataclass
class OffensiveDataModule(Module):
max_num_features: int = field(default=1000)
@provider
@singleton
def provide_data_loader(self, builder: ClassAssistedBuilder[OffensiveDataLoader]) -> DataLoader:
return builder.build(max_num_features=self.max_num_features)
|
from dataclasses import dataclass, field
from logging import Logger
from typing import List
import numpy as np
from injector import ClassAssistedBuilder, Module, inject, provider, singleton
from keras.datasets import imdb
from .data_loader import DataLoader
@inject
@dataclass
class ImdbDataLoader(DataLoader):
"""
Load data for sentiment analysis of IMDB reviews.
https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification
"""
_logger: Logger
num_words: int = field(default=1000)
def classifications(self) -> List[str]:
return ["NEGATIVE", "POSITIVE"]
def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple):
self._logger.info("Loading IMDB review data using %d words.", self.num_words)
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=self.num_words)
if train_size is not None:
x_train, y_train = x_train[:train_size], y_train[:train_size]
if test_size is not None:
x_test, y_test = x_test[:test_size], y_test[:test_size]
def get_features(data):
result = np.zeros((len(data), self.num_words), dtype='int')
for i, x in enumerate(data):
for v in x:
result[i, v] = 1
return result
x_train = get_features(x_train)
x_test = get_features(x_test)
self._logger.info("Done loading IMDB review data.")
return (x_train, y_train), (x_test, y_test)
@dataclass
class ImdbDataModule(Module):
num_words: int = field(default=1000)
@provider
@singleton
def provide_data_loader(self, builder: ClassAssistedBuilder[ImdbDataLoader]) -> DataLoader:
return builder.build(num_words=self.num_words)
|
import unittest
from typing import cast
from injector import Injector
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.featuremapping.hashing.murmurhash3 import MurmurHash3Module
from decai.simulation.data.featuremapping.hashing.token_hash import TokenHash
from decai.simulation.data.offensive_data_loader import OffensiveDataLoader, OffensiveDataModule
from decai.simulation.logging_module import LoggingModule
class TestOffensiveDataLoader(unittest.TestCase):
@classmethod
def setUpClass(cls):
inj = Injector([
LoggingModule,
MurmurHash3Module,
OffensiveDataModule,
])
cls.data_loader = inj.get(DataLoader)
assert isinstance(cls.data_loader, OffensiveDataLoader)
cls.data_loader = cast(OffensiveDataLoader, cls.data_loader)
cls.hash = inj.get(TokenHash)
def test_load(self):
train_size = 20
test_size = 10
(x_train, y_train), (x_test, y_test) = self.data_loader.load_data(train_size=train_size, test_size=test_size)
assert x_train.shape[0] == train_size
assert x_train.shape[0] == y_train.shape[0]
assert x_test.shape[0] == test_size
assert x_test.shape[0] == y_test.shape[0]
assert y_train.shape == (train_size,)
assert y_test.shape == (test_size,)
# Test some values to help avoid regressions.
x_train_values_x, x_train_values_y = x_train[0].nonzero()
self.assertEqual(0, x_train_values_x[0])
self.assertEqual(495653056, x_train_values_y[0])
self.assertEqual(1, x_train[x_train_values_x[0], x_train_values_y[0]])
self.assertEqual(0, x_train_values_x[1])
self.assertEqual(443377497, x_train_values_y[1])
self.assertEqual(1, x_train[x_train_values_x[0], x_train_values_y[0]])
col = self.hash.hash("you")
self.assertEqual(814527388, col)
self.assertEqual(2, x_train[1, col])
|
import unittest
from typing import cast
from injector import Injector
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.news_data_loader import NewsDataLoader, NewsDataModule
from decai.simulation.logging_module import LoggingModule
class TestNewsDataLoader(unittest.TestCase):
@classmethod
def setUpClass(cls):
inj = Injector([
LoggingModule,
NewsDataModule,
])
cls.data_loader = inj.get(DataLoader)
assert isinstance(cls.data_loader, NewsDataLoader)
cls.data_loader = cast(NewsDataLoader, cls.data_loader)
@unittest.skip("The dataset does not exist on CI test machine.")
def test_load_data(self):
(x_train, y_train), (x_test, y_test) = self.data_loader.load_data(train_size=70, test_size=30)
def test_entities(self):
doc = self.data_loader._nlp("December 25, 2019, John Smith walked to a store and bought an apple.")
actual = self.data_loader._replace_entities(doc)
self.assertEqual("<DATE>, <PERSON> walked to a store and bought an apple.", actual)
|
import unittest
from typing import cast
from injector import Injector
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.ttt_data_loader import TicTacToeDataLoader, TicTacToeDataModule
from decai.simulation.logging_module import LoggingModule
class TestTicTacToeDataLoader(unittest.TestCase):
@classmethod
def setUpClass(cls):
inj = Injector([
LoggingModule,
TicTacToeDataModule,
])
cls.ttt = inj.get(DataLoader)
assert isinstance(cls.ttt, TicTacToeDataLoader)
cls.ttt = cast(TicTacToeDataLoader, cls.ttt)
def test_classifications(self):
classifications = self.ttt.classifications()
assert classifications == ["(0, 0)", "(0, 1)", "(0, 2)",
"(1, 0)", "(1, 1)", "(1, 2)",
"(2, 0)", "(2, 1)", "(2, 2)"]
def test_boards(self):
(x_train, y_train), (x_test, y_test) = self.ttt.load_data()
assert x_train.shape[1] == self.ttt.width * self.ttt.length
assert set(x_train[x_train != 0]) == {1, -1}
assert x_test.shape[1] == self.ttt.width * self.ttt.length
assert set(x_test[x_test != 0]) == {1, -1}
assert set(y_train) <= set(range(9))
assert set(y_test) <= set(range(9))
|
import unittest
from typing import cast
from injector import Injector
from decai.simulation.data.data_loader import DataLoader
from decai.simulation.data.fitness_data_loader import FitnessDataLoader, FitnessDataModule
from decai.simulation.logging_module import LoggingModule
class TestFitnessDataLoader(unittest.TestCase):
@classmethod
def setUpClass(cls):
inj = Injector([
LoggingModule,
FitnessDataModule,
])
cls.loader = inj.get(DataLoader)
assert isinstance(cls.loader, FitnessDataLoader)
cls.loader = cast(FitnessDataLoader, cls.loader)
@unittest.skip("The dataset does not exist on CI test machine.")
def test_load(self):
train_size = 70
test_size = 30
(x_train, y_train), (x_test, y_test) = self.loader.load_data(train_size, test_size)
self.assertEqual(train_size, x_train.shape[0])
self.assertEqual(train_size, y_train.shape[0])
self.assertEqual(test_size, x_test.shape[0])
self.assertEqual(test_size, y_test.shape[0])
|
from typing import List, Optional, Tuple
import numpy as np
from injector import singleton
FeatureIndexMapping = List[int]
@singleton
class FeatureIndexMapper:
"""
Helps with mapping sparse data matrices to compact dense ones
since some classifiers don't work well with sparse data:
* SGDClassifier training needs 32-bit integer indices.
* MultinomialNB training makes the data dense.
This is mostly made to work with 2D data.
"""
def map(self, training_data, testing_data) -> Tuple[np.ndarray, np.ndarray, Optional[FeatureIndexMapping]]:
if isinstance(training_data, np.ndarray):
assert isinstance(testing_data, np.ndarray), \
f"Testing data must also be an ndarray if the training data is an ndarray. Got: {type(testing_data)}."
return training_data, testing_data, None
mapping = sorted(map(int, set(training_data.nonzero()[-1])))
feature_index_to_index_mapping = {v: index for (index, v) in enumerate(mapping)}
# We want: `result_train = training_data[:, mapping].todense()` but this was allocating a large matrix even before calling `todense()`.
# Also tried making a mapping matrix and multiplying by it but that also allocated memory.
result_train = np.zeros(training_data.shape[:-1] + (len(mapping),), dtype=training_data.dtype)
*row_indices, col_indices = training_data.nonzero()
col_indices = tuple(feature_index_to_index_mapping[i] for i in col_indices)
result_train[row_indices, col_indices] = training_data[training_data.nonzero()]
result_test = np.zeros(testing_data.shape[:-1] + (len(mapping),), dtype=testing_data.dtype)
*row_indices, col_indices = testing_data.nonzero()
original_col_indices_used = []
row_indices_used = []
col_indices_mapped = []
for row_index, col_index in zip(*row_indices, col_indices):
index = feature_index_to_index_mapping.get(col_index)
if index is not None:
original_col_indices_used.append(col_index)
row_indices_used.append(row_index)
col_indices_mapped.append(index)
result_test[row_indices_used, col_indices_mapped] = testing_data[row_indices_used, original_col_indices_used]
return result_train, result_test, mapping
|
import unittest
import numpy as np
import scipy.sparse
from injector import Injector
from decai.simulation.data.featuremapping.feature_index_mapper import FeatureIndexMapper
from decai.simulation.logging_module import LoggingModule
class TestFeatureIndexMapper(unittest.TestCase):
@classmethod
def setUpClass(cls):
inj = Injector([
LoggingModule,
])
cls.f = inj.get(FeatureIndexMapper)
def test_map_dense(self):
x_train = np.random.random_sample((10, 3))
x_test = np.random.random_sample((4, x_train.shape[1]))
train, test, feature_index_mapping = self.f.map(x_train, x_test)
self.assertIs(train, x_train)
self.assertIs(test, x_test)
self.assertIsNone(feature_index_mapping)
def test_map_sparse(self):
x_train = np.array([[0, 0, 1, 1, 0], [0, 2, 0, 0, 0]])
x_test = np.array([[1, 0, 1, 0, 1], [0, 0, 3, 0, 0]])
x_train_sparse = scipy.sparse.csr_matrix((17348, 4288315073), dtype=np.uint8)
x_train_sparse[x_train.nonzero()] = x_train[x_train.nonzero()]
x_test_sparse = scipy.sparse.csr_matrix((3333, 21312344), dtype=np.uint8)
x_test_sparse[x_test.nonzero()] = x_test[x_test.nonzero()]
mapped_train, mapped_test, feature_index_mapping = self.f.map(x_train_sparse, x_test_sparse)
self.assertEqual(int, type(feature_index_mapping[0]))
self.assertEqual([1, 2, 3], feature_index_mapping)
self.assertTrue(mapped_train.sum(axis=0).all(),
"Every column should have at least one non-zero value.")
x_train_expected = np.zeros((x_train_sparse.shape[0], len(feature_index_mapping)), dtype=np.uint8)
x_train_expected[0, 1] = 1
x_train_expected[0, 2] = 1
x_train_expected[1, 0] = 2
self.assertTrue(np.array_equal(x_train_expected, mapped_train), mapped_train)
x_test_expected = np.zeros((x_test_sparse.shape[0], len(feature_index_mapping)), dtype=np.uint8)
x_test_expected[0, 1] = 1
x_test_expected[1, 1] = 3
self.assertTrue(np.array_equal(x_test_expected, mapped_test), mapped_test)
|
import mmh3
from injector import Module
from decai.simulation.data.featuremapping.hashing.token_hash import TokenHash
class MurmurHash3(TokenHash):
def hash(self, text: str) -> int:
# Made to be equivalent to the JavaScript demo code.
return mmh3.hash(text, signed=False)
class MurmurHash3Module(Module):
def configure(self, binder):
binder.bind(TokenHash, to=MurmurHash3)
|
from abc import ABC, abstractmethod
class TokenHash(ABC):
"""
Hashes token to unsigned integers.
Useful for sparse representation.
"""
@abstractmethod
def hash(self, text: str) -> int:
raise NotImplementedError
|
import unittest
from decai.simulation.data.featuremapping.hashing.murmurhash3 import MurmurHash3
class TestMurmurHash3(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.h = MurmurHash3()
def test_classifications(self):
h = self.h.hash("hey")
assert type(h) == int
assert h == 318325784
assert self.h.hash("blockchain") == 3905957473
|
from setuptools import setup, find_packages
setup(
name='accbpg',
version='0.2',
packages=find_packages(exclude=['tests*']),
license='MIT',
description='Accelerated Bregman proximal gradient (ABPG) methods',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
install_requires=['numpy', 'scipy'],
url='https://github.com/Microsoft/accbpg',
author='Lin Xiao',
author_email='lin.xiao@gmail.com'
)
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
class RSmoothFunction:
"""
Relatively-Smooth Function, can query f(x) and gradient
"""
def __call__(self, x):
assert 0, "RSmoothFunction: __call__(x) is not defined"
def gradient(self, x):
assert 0, "RSmoothFunction: gradient(x) is not defined"
def func_grad(self, x, flag):
"""
flag=0: function, flag=1: gradient, flag=2: function & gradient
"""
assert 0, "RSmoothFunction: func_grad(x, flag) is not defined"
class DOptimalObj(RSmoothFunction):
"""
f(x) = - log(det(H*diag(x)*H')) where H is an m by n matrix, m < n
"""
def __init__(self, H):
self.H = H
self.m = H.shape[0]
self.n = H.shape[1]
assert self.m < self.n, "DOptimalObj: need m < n"
def __call__(self, x):
return self.func_grad(x, flag=0)
def gradient(self, x):
return self.func_grad(x, flag=1)
def func_grad(self, x, flag=2):
assert x.size == self.n, "DOptimalObj: x.size not equal to n"
assert x.min() >= 0, "DOptimalObj: x needs to be nonnegative"
HXHT = np.dot(self.H*x, self.H.T)
if flag == 0: # only return function value
f = -np.log(np.linalg.det(HXHT))
return f
HXHTinvH = np.dot(np.linalg.inv(HXHT), self.H)
g = - np.sum(self.H * HXHTinvH, axis=0)
if flag == 1: # only return gradient
return g
# return both function value and gradient
f = -np.log(np.linalg.det(HXHT))
return f, g
def func_grad_slow(self, x, flag=2):
assert x.size == self.n, "DOptimalObj: x.size not equal to n"
assert x.min() >= 0, "DOptimalObj: x needs to be nonnegative"
sx = np.sqrt(x)
Hsx = self.H*sx; # using numpy array broadcast
HXHT = np.dot(Hsx,Hsx.T)
if flag == 0: # only return function value
f = -np.log(np.linalg.det(HXHT))
return f
Hsx = np.linalg.solve(HXHT, self.H)
g = np.empty(self.n)
for i in range(self.n):
g[i] = - np.dot(self.H[:,i], Hsx[:,i])
if flag == 1: # only return gradient
return g
# return both function value and gradient
f = -np.log(np.linalg.det(HXHT))
return f, g
class PoissonRegression(RSmoothFunction):
"""
f(x) = D_KL(b, Ax) for linear inverse problem A * x = b
"""
def __init__(self, A, b):
assert A.shape[0] == b.shape[0], "A and b sizes not matching"
self.A = A
self.b = b
self.m = A.shape[0]
self.n = A.shape[1]
def __call__(self, x):
return self.func_grad(x, flag=0)
def gradient(self, x):
return self.func_grad(x, flag=1)
def func_grad(self, x, flag=2):
assert x.size == self.n, "PoissonRegression: x.size not equal to n."
Ax = np.dot(self.A, x)
if flag == 0:
fx = sum( self.b * np.log(self.b / Ax) + Ax - self.b )
return fx
# use array broadcasting
g = ((1-self.b/Ax).reshape(self.m, 1) * self.A).sum(axis=0)
# line above is the same as the following code
#g = np.zeros(x.shape)
#for i in range(self.m):
# g += (1 - self.b[i]/np.dot(self.A[i,:], x)) * self.A[i,:]
if flag == 1:
return g
# return both function value and gradient
fx = sum( self.b * np.log(self.b / Ax) + Ax - self.b )
return fx, g
class KLdivRegression(RSmoothFunction):
"""
f(x) = D_KL(Ax, b) for linear inverse problem A * x = b
"""
def __init__(self, A, b):
assert A.shape[0] == b.shape[0], "A and b size not matching"
self.A = A
self.b = b
self.m = A.shape[0]
self.n = A.shape[1]
def __call__(self, x):
return self.func_grad(x, flag=0)
def gradient(self, x):
return self.func_grad(x, flag=1)
def func_grad(self, x, flag=2):
assert x.size == self.n, "NonnegRegression: x.size not equal to n."
Ax = np.dot(self.A, x)
if flag == 0:
fx = sum( Ax * np.log(Ax / self.b) - Ax + self.b )
return fx
# use array broadcasting
g = (np.log(Ax/self.b).reshape(self.m, 1) * self.A).sum(axis=0)
# line above is the same as the following code
#g = np.zeros(x.shape)
#for i in range(self.m):
# g += np.log(Ax[i]/self.b[i]) * self.A[i,:]
if flag == 1:
return g
# return both function value and gradient
fx = sum( Ax * np.log(Ax / self.b) - Ax + self.b )
return fx, g
#######################################################################
class LegendreFunction:
"""
Function of Legendre type, used as the kernel of Bregman divergence for
composite optimization
minimize_{x in C} f(x) + Psi(x)
where f is L-smooth relative to a Legendre function h(x),
Psi(x) is an additional simple convex function.
"""
def __call__(self, x):
assert 0, "LegendreFunction: __call__(x) is not defined."
def extra_Psi(self, x):
return 0
def gradient(self, x):
assert 0, "LegendreFunction: gradient(x) is not defined."
def divergence(self, x, y):
"""
Return D(x,y) = h(x) - h(y) - <h'(y), x-y>
"""
assert 0, "LegendreFunction: divergence(x,y) is not defined."
def prox_map(self, g, L):
"""
Return argmin_{x in C} { Psi(x) + <g, x> + L * h(x) }
"""
assert 0, "LegendreFunction: prox_map(x, L) is not defined."
def div_prox_map(self, y, g, L):
"""
Return argmin_{x in C} { Psi(x) + <g, x> + L * D(x,y) }
default implementation by calling prox_map(g - L*g(y), L)
"""
assert y.shape == g.shape, "Vectors y and g should have same size."
assert L > 0, "Relative smoothness constant L should be positive."
return self.prox_map(g - L*self.gradient(y), L)
class BurgEntropy(LegendreFunction):
"""
h(x) = - sum_{i=1}^n log(x[i]) for x > 0
"""
def __call__(self, x):
assert x.min()>0, "BurgEntropy only takes positive arguments."
return -sum(np.log(x))
def gradient(self, x):
assert x.min()>0, "BurgEntropy only takes positive arguments."
return -1/x
def divergence(self, x, y):
assert x.shape == y.shape, "Vectors x and y are of different sizes."
assert x.min() > 0 and y.min() > 0, "Entries of x or y not positive."
return sum(x/y - np.log(x/y) - 1)
def prox_map(self, g, L):
"""
Return argmin_{x > 0} { <g, x> + L * h(x) }
This function needs to be replaced with inheritance
"""
assert L > 0, "BurgEntropy prox_map only takes positive L value."
assert g.min() > 0, "BurgEntropy prox_map only takes positive value."
return L / g
def div_prox_map(self, y, g, L):
"""
Return argmin_{x > C} { <g, x> + L * D(x,y) }
This is a general function that works for all derived classes
"""
assert y.shape == g.shape, "Vectors y and g are of different sizes."
assert y.min() > 0 and L > 0, "Either y or L is not positive."
return self.prox_map(g - L*self.gradient(y), L)
class BurgEntropyL1(BurgEntropy):
"""
h(x) = - sum_{i=1}^n log(x[i]) used in context of solving the problem
min_{x > 0} f(x) + lamda * ||x||_1
"""
def __init__(self, lamda=0, x_max=1e4):
assert lamda >= 0, "BurgEntropyL1: lambda should be nonnegative."
self.lamda = lamda
self.x_max = x_max
def extra_Psi(self, x):
"""
return lamda * ||x||_1
"""
return self.lamda * x.sum()
def prox_map(self, g, L):
"""
Return argmin_{x > 0} { lambda * ||x||_1 + <g, x> + L h(x) }
!!! This proximal mapping may have unbounded solution x->infty
"""
assert L > 0, "BurgEntropyL1: prox_map only takes positive L."
assert g.min() > -self.lamda, "Not getting positive solution."
#g = np.maximum(g, -self.lamda + 1.0 / self.x_max)
return L / (self.lamda + g)
class BurgEntropyL2(BurgEntropy):
"""
h(x) = - sum_{i=1}^n log(x[i]) used in context of solving the problem
min_{x > 0} f(x) + (lambda/2) ||x||_2^2
"""
def __init__(self, lamda=0):
assert lamda >= 0, "BurgEntropyL2: lamda should be nonnegative."
self.lamda = lamda
def extra_Psi(self, x):
"""
return (lamda/2) * ||x||_2^2
"""
return (self.lamda / 2) * np.dot(x, x)
def prox_map(self, g, L):
"""
Return argmin_{x > 0} { (lamda/2) * ||x||_2^2 + <g, x> + L * h(x) }
"""
assert L > 0, "BurgEntropyL2: prox_map only takes positive L value."
gg = g / L
lamda_L = self.lamda / L
return (np.sqrt(gg*gg + 4*lamda_L) - gg) / (2 * lamda_L)
class BurgEntropySimplex(BurgEntropy):
"""
h(x) = - sum_{i=1}^n log(x[i]) used in the context of solving
min_{x \in C} f(x) where C is the standard simplex, with Psi(x) = 0
"""
def __init__(self, eps=1e-8):
# eps is precision for solving prox_map using Newton's method
assert eps > 0, "BurgEntropySimplex: eps should be positive."
self.eps = eps
def prox_map(self, g, L):
"""
Return argmin_{x in C} { <g, x> + L h(x) } where C is unit simplex
"""
assert L > 0, "BergEntropySimplex prox_map only takes positive L."
gg = g / L
cmin = -gg.min() # choose cmin to ensure min(gg+c) >= 0
# first use bisection to find c such that sum(1/(gg+c)) > 0
c = cmin + 1
while sum(1/(gg+c))-1 < 0:
c = (cmin + c) / 2.0
# then use Newton's method to find optimal c
fc = sum(1/(gg+c))-1
while abs(fc) > self.eps:
fpc = sum(-1.0/(gg+c)**2)
c = c - fc / fpc
fc = sum(1/(gg+c))-1
x = 1.0/(gg+c)
return x
class ShannonEntropy(LegendreFunction):
"""
h(x) = sum_{i=1}^n x[i]*log(x[i]) for x >= 0, note h(0) = 0
"""
def __init__(self, delta=1e-20):
self.delta = delta
def __call__(self, x):
assert x.min() >= 0, "ShannonEntropy takes nonnegative arguments."
xx = np.maximum(x, self.delta)
return sum( xx * np.log(xx) )
def gradient(self, x):
assert x.min() >= 0, "ShannonEntropy takes nonnegative arguments."
xx = np.maximum(x, self.delta)
return 1.0 + np.log(xx)
def divergence(self, x, y):
assert x.shape == y.shape, "Vectors x and y are of different shapes."
assert x.min() >= 0 and y.min() >= 0, "Some entries are negative."
#for i in range(x.size):
# if x[i] > 0 and y[i] == 0:
# return np.inf
return sum(x*np.log((x+self.delta)/(y+self.delta))) + (sum(y)-sum(x))
def prox_map(self, g, L):
"""
Return argmin_{x >= 0} { <g, x> + L * h(x) }
"""
assert L > 0, "ShannonEntropy prox_map require L > 0."
return np.exp(-g/L - 1)
def div_prox_map(self, y, g, L):
"""
Return argmin_{x >= 0} { <g, x> + L * D(x,y) }
"""
assert y.shape == g.shape, "Vectors y and g are of different sizes."
assert y.min() >= 0 and L > 0, "Some entries of y are negavie."
#gg = g/L - self.gradient(y)
#return self.prox_map(gg, 1)
return y * np.exp(-g/L)
class ShannonEntropyL1(ShannonEntropy):
"""
h(x) = sum_{i=1}^n x[i]*log(x[i]) for x >= 0, note h(0) = 0
used in the context of min_{x >=0 } f(x) + lamda * ||x||_1
"""
def __init__(self, lamda=0, delta=1e-20):
ShannonEntropy.__init__(self, delta)
self.lamda = lamda
def extra_Psi(self, x):
"""
return lamda * ||x||_1
"""
return self.lamda * x.sum()
def prox_map(self, g, L):
"""
Return argmin_{x >= 0} { lamda * ||x||_1 + <g, x> + L * h(x) }
"""
return ShannonEntropy.prox_map(self, self.lamda + g, L)
def div_prox_map(self, y, g, L):
"""
Return argmin_{x >= 0} { lamda * ||x||_1 + <g, x> + L * D(x,y) }
"""
return ShannonEntropy.div_prox_map(self, y, self.lamda + g, L)
class ShannonEntropySimplex(ShannonEntropy):
"""
h(x) = sum_{i=1}^n x[i]*log(x[i]) for x >= 0, note h(0) = 0
used in the context of min_{x in C } f(x) where C is standard simplex
"""
def prox_map(self, g, L):
"""
Return argmin_{x in C} { <g, x> + L * h(x) } where C is unit simplex
"""
assert L > 0, "ShannonEntropy prox_map require L > 0."
x = np.exp(-g/L - 1)
return x / sum(x)
def div_prox_map(self, y, g, L):
"""
Return argmin_{x in C} { <g, x> + L*d(x,y) } where C is unit simplex
"""
assert y.shape == g.shape, "Vectors y and g are of different shapes."
assert y.min() > 0 and L > 0, "prox_map needs positive arguments."
x = y * np.exp(-g/L)
return x / sum(x)
class SumOf2nd4thPowers(LegendreFunction):
"""
h(x) = (1/2)||x||_2^2 + (M/4)||x||_2^4
"""
def __init__(self, M):
self.M = M
def __call__(self, x):
normsq = np.dot(x, x)
return 0.5 * normsq + (self.M / 4) * normsq**2
def gradient(self, x):
normsq = np.dot(x, x)
return (1 + self.M * normsq) * x
def divergence(self, x, y):
assert x.shape == y.shape, "Bregman div: x and y not same shape."
return self.__call__(x) - (self.__call__(y)
+ np.dot(self.gradient(y), x-y))
class SquaredL2Norm(LegendreFunction):
"""
h(x) = (1/2)||x||_2^2
"""
def __call__(self, x):
return 0.5*np.dot(x, x)
def gradient(self, x):
return x
def divergence(self, x, y):
assert x.shape == y.shape, "SquaredL2Norm: x and y not same shape."
xy = x - y
return 0.5*np.dot(xy, xy)
def prox_map(self, g, L):
assert L > 0, "SquaredL2Norm: L should be positive."
return -(1/L)*g
def div_prox_map(self, y, g, L):
assert y.shape == g.shape and L > 0, "Vectors y and g not same shape."
return y - (1/L)*g
class PowerNeg1(LegendreFunction):
"""
h(x) = 1/x for x>0
"""
def __call__(self, x):
return 1/x
def gradient(self, x):
return -1/(x*x)
def divergence(self, x, y):
assert x.shape == y.shape, "SquaredL2Norm: x and y not same shape."
xy = x - y
return np.sum(xy*xy/(x*y*y))
def prox_map(self, g, L):
assert L > 0, "SquaredL2Norm: L should be positive."
return np.sqrt(L/g)
class L2L1Linf(LegendreFunction):
"""
usng h(x) = (1/2)||x||_2^2 in solving problems of the form
minimize f(x) + lamda * ||x||_1
subject to ||x||_inf <= B
"""
def __init__(self, lamda=0, B=1):
self.lamda = lamda
self.B = B
def __call__(self, x):
return 0.5*np.dot(x, x)
def extra_Psi(self, x):
"""
return lamda * ||x||_1
"""
return self.lamda * np.sum(abs(x))
def gradient(self, x):
"""
gradient of h(x) = (1/2)||x||_2^2
"""
return x
def divergence(self, x, y):
"""
Bregman divergence D(x, y) = (1/2)||x-y||_2^2
"""
assert x.shape == y.shape, "L2L1Linf: x and y not same shape."
xy = x - y
return 0.5*np.dot(xy, xy)
def prox_map(self, g, L):
"""
Return argmin_{x in C} { Psi(x) + <g, x> + L * h(x) }
"""
assert L > 0, "L2L1Linf: L should be positive."
x = -(1.0/L) * g
threshold = self.lamda / L
x[abs(x) <= threshold] = 0
x[x > threshold] -= threshold
x[x < -threshold] += threshold
np.clip(x, -self.B, self.B, out=x)
return x
def div_prox_map(self, y, g, L):
"""
Return argmin_{x in C} { Psi(x) + <g, x> + L * D(x,y) }
"""
assert y.shape == g.shape and L > 0, "Vectors y and g not same shape."
return self.prox_map(g - L*y, L)
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import time
def BPG(f, h, L, x0, maxitrs, epsilon=1e-14, linesearch=True, ls_ratio=1.2,
verbose=True, verbskip=1):
"""
Bregman Proximal Gradient (BGP) method for min_{x in C} f(x) + Psi(x):
x(k+1) = argmin_{x in C} { Psi(x) + <f'(x(k)), x> + L(k) * D_h(x,x(k))}
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
maxitrs: maximum number of iterations
epsilon: stop if F(x[k])-F(x[k-1]) < epsilon, where F(x)=f(x)+Psi(x)
linesearch: whether or not perform line search (True or False)
ls_ratio: backtracking line search parameter >= 1
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
Ls: array storing local Lipschitz constants obtained by line search
T: array storing time used up to iteration k
"""
if verbose:
print("\nBPG_LS method for min_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) Lk time")
start_time = time.time()
F = np.zeros(maxitrs)
Ls = np.ones(maxitrs) * L
T = np.zeros(maxitrs)
x = np.copy(x0)
for k in range(maxitrs):
fx, g = f.func_grad(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
if linesearch:
L = L / ls_ratio
x1 = h.div_prox_map(x, g, L)
while f(x1) > fx + np.dot(g, x1-x) + L*h.divergence(x1, x):
L = L * ls_ratio
x1 = h.div_prox_map(x, g, L)
x = x1
else:
x = h.div_prox_map(x, g, L)
# store and display computational progress
Ls[k] = L
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:6.1f}".format(k, F[k], L, T[k]))
# stopping criteria
if k > 0 and abs(F[k]-F[k-1]) < epsilon:
break;
F = F[0:k+1]
Ls = Ls[0:k+1]
T = T[0:k+1]
return x, F, Ls, T
def solve_theta(theta, gamma, gainratio=1):
"""
solve theta_k1 from the equation
(1-theta_k1)/theta_k1^gamma = gainratio * 1/theta_k^gamma
using Newton's method, starting from theta
"""
ckg = theta**gamma / gainratio
cta = theta
eps = 1e-6 * theta
phi = cta**gamma - ckg*(1-cta)
while abs(phi) > eps:
drv = gamma * cta**(gamma-1) + ckg
cta = cta - phi / drv
phi = cta**gamma - ckg*(1-cta)
return cta
def ABPG(f, h, L, x0, gamma, maxitrs, epsilon=1e-14, theta_eq=False,
restart=False, restart_rule='g', verbose=True, verbskip=1):
"""
Accelerated Bregman Proximal Gradient (ABPG) method for solving
minimize_{x in C} f(x) + Psi(x):
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma: triangle scaling exponent (TSE) for Bregman div D_h(x,y)
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
theta_eq: calculate theta_k by solving equality using Newton's method
restart: restart the algorithm when overshooting (True or False)
restart_rule: 'f' for function increasing or 'g' for gradient angle
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
G: triangle scaling gains D(xk,yk) / D(zk,zk_1) / theta_k^gamma
T: array storing time used up to iteration k
"""
if verbose:
print("\nABPG method for minimize_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) theta" +
" TSG D(x+,y) D(z+,z) time")
start_time = time.time()
F = np.zeros(maxitrs)
G = np.zeros(maxitrs)
T = np.zeros(maxitrs)
x = np.copy(x0)
z = np.copy(x0)
theta = 1.0 # initialize theta = 1 for updating with equality
kk = 0 # separate counter for theta_k, easy for restart
for k in range(maxitrs):
# function value at previous iteration
fx = f(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
# Update three iterates x, y and z
z_1 = z
x_1 = x # only required for restart mode
if theta_eq and kk > 0:
theta = solve_theta(theta, gamma)
else:
theta = gamma / (kk + gamma)
y = (1-theta)*x + theta*z_1
g = f.gradient(y)
z = h.div_prox_map(z_1, g, theta**(gamma-1) * L)
x = (1-theta)*x + theta*z
# compute triangle scaling quantities
dxy = h.divergence(x, y)
dzz = h.divergence(z, z_1)
Gdr = dxy / dzz / theta**gamma
# store and display computational progress
G[k] = Gdr
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e} {5:10.3e} {6:6.1f}".format(
k, F[k], theta, Gdr, dxy, dzz, T[k]))
# restart if gradient predicts objective increase
kk += 1
if restart and k > 0:
#if k > 0 and F[k] > F[k-1]:
#if np.dot(g, x-x_1) > 0:
if (restart_rule == 'f' and F[k] > F[k-1]) or (restart_rule == 'g' and np.dot(g, x-x_1) > 0):
theta = 1.0 # reset theta = 1 for updating with equality
kk = 0 # reset kk = 0 for theta = gamma/(kk+gamma)
z = x # in either case, reset z = x and also y
# stopping criteria
if dzz < epsilon:
break;
F = F[0:k+1]
G = G[0:k+1]
T = T[0:k+1]
return x, F, G, T
def ABPG_expo(f, h, L, x0, gamma0, maxitrs, epsilon=1e-14, delta=0.2,
theta_eq=True, checkdiv=False, Gmargin=10, restart=False,
restart_rule='g', verbose=True, verbskip=1):
"""
Accelerated Bregman Proximal Gradient method with exponent adaption for
minimize_{x in C} f(x) + Psi(x)
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma0: initial triangle scaling exponent(TSE) for D_h(x,y) (>2)
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
delta: amount to decrease TSE for exponent adaption
theta_eq: calculate theta_k by solving equality using Newton's method
checkdiv: check triangle scaling inequality for adaption (True/False)
Gmargin: extra gain margin allowed for checking TSI
restart: restart the algorithm when overshooting (True or False)
restart_rule: 'f' for function increasing or 'g' for gradient angle
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
Gamma: gamma_k obtained at each iteration
G: triangle scaling gains D(xk,yk)/D(zk,zk_1)/theta_k^gamma_k
T: array storing time used up to iteration k
"""
if verbose:
print("\nABPG_expo method for min_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) theta gamma" +
" TSG D(x+,y) D(z+,z) time")
start_time = time.time()
F = np.zeros(maxitrs)
G = np.zeros(maxitrs)
Gamma = np.ones(maxitrs) * gamma0
T = np.zeros(maxitrs)
gamma = gamma0
x = np.copy(x0)
z = np.copy(x0)
theta = 1.0 # initialize theta = 1 for updating with equality
kk = 0 # separate counter for theta_k, easy for restart
for k in range(maxitrs):
# function value at previous iteration
fx = f(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
# Update three iterates x, y and z
z_1 = z
x_1 = x
if theta_eq and kk > 0:
theta = solve_theta(theta, gamma)
else:
theta = gamma / (kk + gamma)
y = (1-theta)*x_1 + theta*z_1
#g = f.gradient(y)
fy, g = f.func_grad(y)
condition = True
while condition: # always execute at least once per iteration
z = h.div_prox_map(z_1, g, theta**(gamma-1) * L)
x = (1-theta)*x_1 + theta*z
# compute triangle scaling quantities
dxy = h.divergence(x, y)
dzz = h.divergence(z, z_1)
Gdr = dxy / dzz / theta**gamma
if checkdiv:
condition = (dxy > Gmargin * (theta**gamma) * dzz )
else:
condition = (f(x) > fy + np.dot(g, x-y) + theta**gamma*L*dzz)
if condition and gamma > 1:
gamma = max(gamma - delta, 1)
else:
condition = False
# store and display computational progress
G[k] = Gdr
Gamma[k] = gamma
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e} {5:10.3e} {6:10.3e} {7:6.1f}".format(
k, F[k], theta, gamma, Gdr, dxy, dzz, T[k]))
# restart if gradient predicts objective increase
kk += 1
if restart:
#if k > 0 and F[k] > F[k-1]:
#if np.dot(g, x-x_1) > 0:
if (restart_rule == 'f' and F[k] > F[k-1]) or (restart_rule == 'g' and np.dot(g, x-x_1) > 0):
theta = 1.0 # reset theta = 1 for updating with equality
kk = 0 # reset kk = 0 for theta = gamma/(kk+gamma)
z = x # in either case, reset z = x and also y
# stopping criteria
if dzz < epsilon:
break;
F = F[0:k+1]
Gamma = Gamma[0:k+1]
G = G[0:k+1]
T = T[0:k+1]
return x, F, Gamma, G, T
def ABPG_gain(f, h, L, x0, gamma, maxitrs, epsilon=1e-14, G0=1,
ls_inc=1.2, ls_dec=1.2, theta_eq=True, checkdiv=False,
restart=False, restart_rule='g', verbose=True, verbskip=1):
"""
Accelerated Bregman Proximal Gradient (ABPG) method with gain adaption for
minimize_{x in C} f(x) + Psi(x):
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma: triangle scaling exponent(TSE) for Bregman distance D_h(x,y)
G0: initial value for triangle scaling gain
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
ls_inc: factor of increasing gain (>=1)
ls_dec: factor of decreasing gain (>=1)
theta_eq: calculate theta_k by solving equality using Newton's method
checkdiv: check triangle scaling inequality for adaption (True/False)
restart: restart the algorithm when overshooting (True/False)
restart_rule: 'f' for function increasing or 'g' for gradient angle
verbose: display computational progress (True/False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
Gain: triangle scaling gains G_k obtained by LS at each iteration
Gdiv: triangle scaling gains D(xk,yk)/D(zk,zk_1)/theta_k^gamma_k
Gavg: geometric mean of G_k at all steps up to iteration k
T: array storing time used up to iteration k
"""
if verbose:
print("\nABPG_gain method for min_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) theta Gk" +
" TSG D(x+,y) D(z+,z) Gavg time")
start_time = time.time()
F = np.zeros(maxitrs)
Gain = np.ones(maxitrs) * G0
Gdiv = np.zeros(maxitrs)
Gavg = np.zeros(maxitrs)
T = np.zeros(maxitrs)
x = np.copy(x0)
z = np.copy(x0)
G = G0
# logGavg = (gamma*log(G0) + log(G_1) + ... + log(Gk)) / (k+gamma)
sumlogG = gamma * np.log(G)
theta = 1.0 # initialize theta = 1 for updating with equality
kk = 0 # separate counter for theta_k, easy for restart
for k in range(maxitrs):
# function value at previous iteration
fx = f(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
# Update three iterates x, y and z
z_1 = z
x_1 = x
# adaptive option: always try a smaller Gain first before line search
G_1 = G
theta_1 = theta
G = G / ls_dec
condition = True
while condition:
if kk > 0:
if theta_eq:
theta = solve_theta(theta_1, gamma, G / G_1)
else:
alpha = G / G_1
theta = theta_1*((1+alpha*(gamma-1))/(gamma*alpha+theta_1))
y = (1-theta)*x_1 + theta*z_1
#g = f.gradient(y)
fy, g = f.func_grad(y)
z = h.div_prox_map(z_1, g, theta**(gamma-1) * G * L)
x = (1-theta)*x_1 + theta*z
# compute triangle scaling quantities
dxy = h.divergence(x, y)
dzz = h.divergence(z, z_1)
if dzz < epsilon:
break
Gdr = dxy / dzz / theta**gamma
if checkdiv:
condition = (Gdr > G )
else:
condition = (f(x) > fy + np.dot(g,x-y) + theta**gamma*G*L*dzz)
if condition:
G = G * ls_inc
# store and display computational progress
Gain[k] = G
Gdiv[k] = Gdr
sumlogG += np.log(G)
Gavg[k] = np.exp(sumlogG / (gamma + k))
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e} {5:10.3e} {6:10.3e} {7:10.3e} {8:6.1f}".format(
k, F[k], theta, G, Gdr, dxy, dzz, Gavg[k], T[k]))
# restart if gradient predicts objective increase
kk += 1
if restart:
#if k > 0 and F[k] > F[k-1]:
#if np.dot(g, x-x_1) > 0:
if (restart_rule == 'f' and F[k] > F[k-1]) or (restart_rule == 'g' and np.dot(g, x-x_1) > 0):
theta = 1.0 # reset theta = 1 for updating with equality
kk = 0 # reset kk = 0 for theta = gamma/(kk+gamma)
z = x # in either case, reset z = x and also y
# stopping criteria
if dzz < epsilon:
break;
F = F[0:k+1]
Gain = Gain[0:k+1]
Gdiv = Gdiv[0:k+1]
Gavg = Gavg[0:k+1]
T = T[0:k+1]
return x, F, Gain, Gdiv, Gavg, T
def ABDA(f, h, L, x0, gamma, maxitrs, epsilon=1e-14, theta_eq=True,
verbose=True, verbskip=1):
"""
Accelerated Bregman Dual Averaging (ABDA) method for solving
minimize_{x in C} f(x) + Psi(x)
Inputs:
f, h, L: f is L-smooth relative to h, and Psi is defined within h
x0: initial point to start algorithm
gamma: triangle scaling exponent (TSE) for Bregman distance D_h(x,y)
maxitrs: maximum number of iterations
epsilon: stop if D_h(z[k],z[k-1]) < epsilon
theta_eq: calculate theta_k by solving equality using Newton's method
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, Fx, Ls):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
G: triangle scaling gains D(xk,yk)/D(zk,zk_1)/theta_k^gamma
T: array storing time used up to iteration k
"""
# Simple restart schemes for dual averaging method do not work!
restart = False
if verbose:
print("\nABDA method for min_{x in C} F(x) = f(x) + Psi(x)")
print(" k F(x) theta" +
" TSG D(x+,y) D(z+,z) time")
start_time = time.time()
F = np.zeros(maxitrs)
G = np.zeros(maxitrs)
T = np.zeros(maxitrs)
x = np.copy(x0)
z = np.copy(x0)
theta = 1.0 # initialize theta = 1 for updating with equality
kk = 0 # separate counter for theta_k, easy for restart
gavg = np.zeros(x.size)
csum = 0
for k in range(maxitrs):
# function value at previous iteration
fx = f(x)
F[k] = fx + h.extra_Psi(x)
T[k] = time.time() - start_time
# Update three iterates x, y and z
z_1 = z
x_1 = x
if theta_eq and kk > 0:
theta = solve_theta(theta, gamma)
else:
theta = gamma / (kk + gamma)
y = (1-theta)*x_1 + theta*z_1
g = f.gradient(y)
gavg = gavg + theta**(1-gamma) * g
csum = csum + theta**(1-gamma)
z = h.prox_map(gavg/csum, L/csum)
x = (1-theta)*x_1 + theta*z
# compute triangle scaling quantities
dxy = h.divergence(x, y)
dzz = h.divergence(z, z_1)
Gdr = dxy / dzz / theta**gamma
# store and display computational progress
G[k] = Gdr
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e} {5:10.3e} {6:6.1f}".format(
k, F[k], theta, Gdr, dxy, dzz, T[k]))
kk += 1
# restart does not work for ABDA (restart = False)
if restart:
if k > 0 and F[k] > F[k-1]:
#if np.dot(g, x-x_1) > 0: # this does not work for dual averaging
theta = 1.0 # reset theta = 1 for updating with equality
kk = 0 # reset kk = 0 for theta = gamma/(kk+gamma)
z = x # in either case, reset z = x and also y
gavg = np.zeros(x.size) # this is why restart does not work
csum = 0
# stopping criteria
if dzz < epsilon:
break;
F = F[0:k+1]
G = G[0:k+1]
T = T[0:k+1]
return x, F, G, T
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
from .functions import *
from .utils import load_libsvm_file
def D_opt_libsvm(filename):
"""
Generate a D-Optimal Design problem from LIBSVM datasets
"""
X, y = load_libsvm_file(filename)
if X.shape[0] > X.shape[1]:
H = X.T.toarray('C')
else:
H = X.toarray('C')
n = H.shape[1]
f = DOptimalObj(H)
h = BurgEntropySimplex()
L = 1.0
x0 = (1.0/n)*np.ones(n)
return f, h, L, x0
def D_opt_design(m, n, randseed=-1):
"""
Generate a random instance of the D-Optimal Design problem
m, n: size of design matrix H is m by n wiht m < n
Return f, h, L, x0:
f: f(x) = - log(det(H*diag(x)*H'))
h: Burg Entrop with Simplex constraint
L: L = 1
x0: initial point is center of simplex
"""
if randseed > 0:
np.random.seed(randseed)
H = np.random.randn(m,n)
f = DOptimalObj(H)
h = BurgEntropySimplex()
L = 1.0
x0 = (1.0/n)*np.ones(n)
return f, h, L, x0
def D_opt_KYinit(V):
"""
Return a sparse initial point for MVE or D-optimal design problem
proposed by Kuman and Yildirim (JOTA 126(1):1-21, 2005)
"""
m, n = V.shape
if n <= 2*m:
return (1.0/n)*np.ones(n)
I = []
Q = np.zeros((m, m))
# Using (unstable) Gram-Schmidt without calling QR repetitively
for i in range(m):
b = np.random.rand(m)
q = np.copy(b)
for j in range(i):
Rij = np.dot(Q[:,j], b)
q = q - Rij * Q[:,j]
qV = np.dot(q, V)
kmax = np.argmax(qV)
kmin = np.argmin(qV)
I.append(kmax)
I.append(kmin)
v = V[:,kmin] - V[:,kmax]
q = np.copy(v)
for j in range(i):
Rij = np.dot(Q[:,j], v)
q = q - Rij * Q[:,j]
Q[:,i] = q / np.linalg.norm(q)
x0 = np.zeros(n)
x0[I] = np.ones(len(I)) / len(I)
# in case there are repeated entries in I, scale to sum 1
x0 /= x0.sum()
return x0
def Poisson_regrL1(m, n, noise=0.01, lamda=0, randseed=-1, normalizeA=True):
"""
Generate a random instance of L1-regularized Poisson regression problem
minimize_{x >= 0} D_KL(b, Ax) + lamda * ||x||_1
where
A: m by n nonnegative matrix
b: nonnegative vector of length m
noise: noise level to generate b = A * x + noise
lambda: L1 regularization weight
normalizeA: wether or not to normalize columns of A
Return f, h, L, x0:
f: f(x) = D_KL(b, Ax)
h: Burg entropy with L1 regularization
L: L = ||b||_1
x0: initial point, scaled version of all-one vector
"""
if randseed > 0:
np.random.seed(randseed)
A = np.random.rand(m,n)
if normalizeA:
A = A / A.sum(axis=0) # scaling to make column sums equal to 1
x = np.random.rand(n) / n
xavg = x.sum() / x.size
x = np.maximum(x - xavg, 0) * 10
b = np.dot(A, x) + noise * (np.random.rand(m) - 0.5)
assert b.min() > 0, "need b > 0 for nonnegative regression."
f = PoissonRegression(A, b)
# L1 regularization often not enough for convergence!
h = BurgEntropyL1(lamda)
L = b.sum()
# Initial point should be far from 0 in order for ARDA to work well!
x0 = (1.0/n)*np.ones(n) * 10
return f, h, L, x0
def Poisson_regrL2(m, n, noise=0.01, lamda=0, randseed=-1, normalizeA=True):
"""
Generate a random instance of L2-regularized Poisson regression problem
minimize_{x >= 0} D_KL(b, Ax) + (lamda/2) * ||x||_2^2
where
A: m by n nonnegative matrix
b: nonnegative vector of length m
noise: noise level to generate b = A * x + noise
lambda: L2 regularization weight
normalizeA: wether or not to normalize columns of A
Return f, h, L, x0:
f: f(x) = D_KL(b, Ax)
h: Burg entropy with L1 regularization
L: L = ||b||_1
x0: initial point is center of simplex
"""
if randseed > 0:
np.random.seed(randseed)
A = np.random.rand(m,n)
if normalizeA:
A = A / A.sum(axis=0) # scaling to make column sums equal to 1
x = np.random.rand(n) / n
xavg = x.sum() / x.size
x = np.maximum(x - xavg, 0) * 10
b = np.dot(A, x) + noise * (np.random.rand(m) - 0.5)
assert b.min() > 0, "need b > 0 for nonnegative regression."
f = PoissonRegression(A, b)
h = BurgEntropyL2(lamda)
L = b.sum()
# Initial point should be far from 0 in order for ARDA to work well!
x0 = (1.0/n)*np.ones(n)
return f, h, L, x0
def KL_nonneg_regr(m, n, noise=0.01, lamdaL1=0, randseed=-1, normalizeA=True):
"""
Generate a random instance of L1-regularized KL regression problem
minimize_{x >= 0} D_KL(Ax, b) + lamda * ||x||_1
where
A: m by n nonnegative matrix
b: nonnegative vector of length m
noise: noise level to generate b = A * x + noise
lambda: L2 regularization weight
normalizeA: wether or not to normalize columns of A
Return f, h, L, x0:
f: f(x) = D_KL(Ax, b)
h: h(x) = Shannon entropy (with L1 regularization as Psi)
L: L = max(sum(A, axis=0)), maximum column sum
x0: initial point, scaled version of all-one vector
"""
if randseed > 0:
np.random.seed(randseed)
A = np.random.rand(m,n)
if normalizeA:
A = A / A.sum(axis=0) # scaling to make column sums equal to 1
x = np.random.rand(n)
b = np.dot(A, x) + noise * (np.random.rand(m) - 0.5)
assert b.min() > 0, "need b > 0 for nonnegative regression."
f = KLdivRegression(A, b)
h = ShannonEntropyL1(lamdaL1)
L = max( A.sum(axis=0) ) #L = 1.0 if columns of A are normalized
x0 = 0.5*np.ones(n)
#x0 = (1.0/n)*np.ones(n)
return f, h, L, x0
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .functions import *
from .algorithms import BPG, ABPG, ABPG_expo, ABPG_gain, ABDA
from .applications import D_opt_libsvm, D_opt_design, D_opt_KYinit, Poisson_regrL1, Poisson_regrL2, KL_nonneg_regr
from .D_opt_alg import D_opt_FW, D_opt_FW_away
from .trianglescaling import plotTSE, plotTSE0
from .plotfigs import plot_comparisons |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
#import matplotlib.pyplot as plt
from matplotlib.pyplot import *
def plot_comparisons(axis, y_vals, labels, x_vals=[], plotdiff=False,
yscale="linear", xscale="linear",
xlim=[], ylim=[], xlabel="", ylabel="", legendloc=0,
linestyles=['k:', 'g-', 'b-.', 'k-', 'r--', 'k-', 'm-'],
linedash=[[1,2], [], [4,2,1,2], [], [4,2], [], [], []]):
"""
Plot comparison figures using matplotlib.pyplot.
"""
y_shift = 0
if plotdiff:
y_shift = y_vals[0].min()
for i in range(len(y_vals)):
y_shift = min(y_shift, y_vals[i].min())
for i in range(len(y_vals)):
if len(x_vals) > 0:
xi = x_vals[i]
else:
xi = np.arange(len(y_vals[i])) + 1
axis.plot(xi, y_vals[i]-y_shift, linestyles[i], label=labels[i],
dashes=linedash[i])
axis.set_xscale(xscale)
axis.set_yscale(yscale)
axis.set_xlabel(xlabel)
axis.set_ylabel(ylabel)
if legendloc == "no":
pass
elif legendloc == "outside":
axis.legend(bbox_to_anchor=(1.05, 1), loc="upper left", borderaxespad=0)
else:
axis.legend(loc=legendloc)
if len(xlim) > 0:
axis.set_xlim(xlim)
if len(ylim) > 0:
axis.set_ylim(ylim)
|
import os.path
import numpy as np
import scipy.sparse as sparse
def _open_file(filename):
_, ext = os.path.splitext(filename)
if ext == '.gz':
import gzip
return gzip.open(filename, 'rt')
elif ext == '.bz2':
import bz2
return bz2.open(filename, 'rt')
else:
return open(filename, 'r')
def load_libsvm_file(filename, dtype=np.float64,
n_features=None, zero_based="auto"):
"""
Load dataset in svmlight / libsvm format into sparse CSR matrix.
Inputs:
filename: a string including file path and name
dtype: numpy dtype of feature values
n_features: number of features, optional
zero_based: boolean or "auto", optional
Returns:
X: scipy.sparse.csr_matrix of shape (n_samples, n_features)
y: numpy.ndarray of shape (n_samples,)
"""
labels = []
data = []
indptr = []
indices = []
with _open_file(filename) as f:
for line in f:
# skip comments in the line
idx_comment = line.find('#')
if idx_comment >= 0:
line = line[:idx_comment]
line_parts = line.split()
if len(line_parts) == 0:
continue
labels.append(float(line_parts[0]))
indptr.append(len(data))
prev_idx = -1
for i in range(1,len(line_parts)):
idx_str, value = line_parts[i].split(':',1)
idx = int(idx_str)
if idx < 0 or (not zero_based and idx == 0):
raise ValueError(
"Invalid index {0:d} in LibSVM data file.".format(idx))
if idx <= prev_idx:
raise ValueError("Feature indices in LibSVM data file"
"should be sorted and unique.")
indices.append(idx)
data.append(dtype(value))
prev_idx = idx
# construct data arrays
indptr.append(len(data))
data = np.array(data)
indptr = np.array(indptr)
indices = np.array(indices)
if (zero_based is False or zero_based == "auto" and indices.min() > 0):
indices -= 1
if n_features is None:
n_features = indices.max() + 1
else:
if n_features < indices.max() + 1:
n_features = indices.max() + 1
print("Warning: n_features increased to match data.")
shape = (indptr.shape[0] - 1, n_features)
X = sparse.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
y = np.array(labels)
return X, y
def shuffle_data(X, y):
'''
We need to return here since whole array assignment in numpy does not
change input arguments, i.e., it does NOT behaves as passing by reference
'''
index = np.arange(len(y))
np.random.shuffle(index)
X = X[index,:]
y = y[index]
return X, y
def mnist_2digits(X, y, d1, d2):
index1 = np.nonzero(y==d1)
index2 = np.nonzero(y==d2)
ycopy = y.copy()
ycopy[index1] = 1
ycopy[index2] = -1
index = np.concatenate((index1[0], index2[0]))
np.random.shuffle(index)
Xd1d2 = X[index, :]
yd1d2 = ycopy[index]
return Xd1d2, yd1d2
def binary_error_rate(X, y, w, bias=0):
if sparse.isspmatrix(X):
yp = np.sign( X * w + bias )
else:
yp = np.sign( np.dot(X, w) + bias )
return (1 - np.dot(yp, y)/len(y))/2
def rmse(X, y, w, bias=0):
if sparse.isspmatrix(X):
yp = X * w + bias
else:
yp = np.dot(X, w) + bias
error2 = (yp - y)**2
return np.sqrt(error2.mean())
def row_norm_squared(X):
"return squared 2-norms of each row"
X2 = sparse.csr_matrix((X.data**2, X.indices, X.indptr), X.shape)
return np.squeeze(np.asarray(X2.sum(1)))
def load_sido(filename):
with np.load(filename) as D:
data = D['Xdata']
indptr = D['Xindptr']
indices = D['Xindices']
y = D['y']
shape = D['shape']
X = sparse.csr_matrix((data, indices, indptr), shape)
return X, y
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import time
def D_opt_FW(V, x0, eps, maxitrs, verbose=True, verbskip=1):
"""
Solve the D-optimal design problem by the Frank-Wolfe algorithm
minimize - log(det(V*diag(x)*V'))
subject to x >= 0 and sum_i x_i=1
where V is m by n matrix and x belongs to n-dimensional simplex
Inputs:
V: matrix of size m by n with m < n
x0: initial point
eps: precision for optimality conditions (complementary slackness)
maxitrs: maximum number of iterations
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, F, SP, SN, T):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
SP: positive slackness
SN: negative slackness
T: array storing time used up to iteration k
"""
start_time = time.time()
m, n = V.shape
F = np.zeros(maxitrs)
SP = np.zeros(maxitrs)
SN = np.zeros(maxitrs)
T = np.zeros(maxitrs)
x = np.copy(x0)
VXVT = np.dot(V*x, V.T)
detVXVT = np.linalg.det(VXVT)
H = np.linalg.inv(VXVT)
# compute w = - gradient # This step cost m^2*n
w = np.sum(V * np.dot(H, V), axis=0)
if verbose:
print("\nSolving D-opt design problem using Frank-Wolfe method")
print(" k F(x) pos_slack neg_slack time")
for k in range(maxitrs):
F[k] = - np.log(detVXVT)
T[k] = time.time() - start_time
# compute w = - gradient # This step cost m^2*n
#w = np.sum(V * np.dot(H, V), axis=0)
# check approximate optimality conditions
i = np.argmax(w)
w_xpos = w[x>0]
j = np.argmin(w_xpos)
eps_pos = w[i] / m - 1
eps_neg = 1 - w_xpos[j] / m
SP[k] = eps_pos
SN[k] = eps_neg
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:10.3e} {4:6.1f}".format(
k, F[k], eps_pos, eps_neg, T[k]))
if eps_pos <= eps and eps_neg <= eps:
break
t = (w[i] / m - 1) / (w[i] - 1)
x *= (1 - t)
x[i] += t
HVi = np.dot(H, V[:,i])
H = (H - (t / (1 + t * (w[i] - 1))) * np.outer(HVi, HVi)) / (1 - t)
detVXVT *= np.power(1 - t, m - 1) * (1 + t * (w[i] - 1))
# compute w more efficiently # This step cost m*n
w = (w - (t / (1 + t * (w[i] - 1))) * np.dot(HVi, V)**2 ) / (1 - t)
F = F[0:k+1]
SP = SP[0:k+1]
SN = SN[0:k+1]
T = T[0:k+1]
return x, F, SP, SN, T
def D_opt_FW_away(V, x0, eps, maxitrs, verbose=True, verbskip=1):
"""
Solve the D-optimal design problem by Frank-Wolfe (Wolfe-Atwood) algorithm
minimize - log(det(V*diag(x)*V'))
subject to x >= 0 and sum_i x_i=1
where V is m by n matrix and x belongs to n-dimensional simplex.
This is equivalent to the Frank-Wolfe algorithm with Away steps.
Inputs:
V: matrix of size m by n with m < n
x0: initial point
eps: precision for optimality conditions (complementary slackness)
maxitrs: maximum number of iterations
verbose: display computational progress (True or False)
verbskip: number of iterations to skip between displays
Returns (x, F, SP, SN, T):
x: the last iterate of BPG
F: array storing F(x[k]) for all k
SP: positive slackness
SN: negative slackness
T: array storing time used up to iteration k
"""
start_time = time.time()
m, n = V.shape
F = np.zeros(maxitrs)
SP = np.zeros(maxitrs)
SN = np.zeros(maxitrs)
T = np.zeros(maxitrs)
x = np.copy(x0)
VXVT = np.dot(V*x, V.T)
detVXVT = np.linalg.det(VXVT)
H = np.linalg.inv(VXVT)
# compute w = - gradient # This step cost m^2*n
w = np.sum(V * np.dot(H, V), axis=0)
if verbose:
print("\nSolving D-opt design problem using Frank-Wolfe method with away steps")
print(" k F(x) pos_slack neg_slack time")
for k in range(maxitrs):
F[k] = np.log(np.linalg.det(H))
# the following can be much faster but often inaccurate!
#F[k] = - np.log(detVXVT)
T[k] = time.time() - start_time
# compute w = - gradient # This step cost m^2*n
#w = np.sum(V * np.dot(H, V), axis=0)
# check approximate optimality conditions
i = np.argmax(w)
ww = w - w[i] # shift the array so that ww.max() = 0
j = np.argmin(ww * [x > 1.0e-8])
#j = np.argmin(ww * [x > 0])
eps_pos = w[i] / m - 1
eps_neg = 1 - w[j] / m
SP[k] = eps_pos
SN[k] = eps_neg
if verbose and k % verbskip == 0:
print("{0:6d} {1:10.3e} {2:10.3e} {3:10.3e} {4:6.1f}".format(
k, F[k], eps_pos, eps_neg, T[k]))
if eps_pos <= eps and eps_neg <= eps:
break
if eps_pos >= eps_neg:
t = (w[i] / m - 1) / (w[i] - 1)
x *= (1 - t)
x[i] += t
HVi = np.dot(H, V[:,i])
H = (H - (t / (1 - t + t * w[i])) * np.outer(HVi, HVi)) / (1 - t)
detVXVT *= np.power(1 - t, m - 1) * (1 + t * (w[i] - 1))
# compute w more efficiently # This step cost m*n
w = (w - (t / (1 - t + t * w[i])) * np.dot(HVi, V)**2 ) / (1 - t)
else: # Wolfe's awaystep
t = min((1 - w[j] / m) / (w[j] - 1), x[j] / (1 - x[j]))
x *= (1 + t)
x[j] -= t
HVj = np.dot(H, V[:,j])
H = (H + (t / (1 + t - t * w[j])) * np.outer(HVj, HVj)) / (1 + t)
detVXVT *= np.power(1 + t, m - 1) * (1 + t - t * w[i])
# compute w more efficiently # This step cost m*n
w = (w + (t / (1 + t - t * w[j])) * np.dot(HVj, V)**2 ) / (1 + t)
F = F[0:k+1]
SP = SP[0:k+1]
SN = SN[0:k+1]
T = T[0:k+1]
return x, F, SP, SN, T
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
Example of logistic regression with L1 regularization and Linf bounds
minimize_x f(x) = (1/m) * sum_{i=1}^m log(1 + exp(-b_i*(ai'*x)))
subject to x in R^n, and ||x||_inf <= B
The objective f is 1-relative smooth relative to (1/2)||x||_2^2.
"""
import numpy as np
from functions import RSmoothFunction, L2L1Linf, SquaredL2Norm
from algorithms import BPG, ABPG_gain
class LogisticRegression(RSmoothFunction):
"""
f(x) = (1/m)*sum_{i=1}^m log(1 + exp(-b_i*(ai'*x))) with ai in R^n, bi in R
"""
def __init__(self, A, b):
assert len(b) == A.shape[0], "Logistic Regression: len(b) != m"
self.bA = np.reshape(b, [len(b),1]) * A
self.m = A.shape[0]
self.n = A.shape[1]
def __call__(self, x):
return self.func_grad(x, flag=0)
def gradient(self, x):
return self.func_grad(x, flag=1)
def func_grad(self, x, flag=2):
assert x.size == self.n, "Logistic Regression: x.size not equal to n"
bAx = np.dot(self.bA, x)
loss = - bAx
mask = bAx > -50
loss[mask] = np.log(1 + np.exp(-bAx[mask]))
f = np.sum(loss) / self.m
if flag == 0:
return f
p = -1/(1+np.exp(bAx))
g = np.dot(p, self.bA) / self.m
if flag == 1:
return g
return f, g
def test_L2L1Linf():
m = 100
n = 200
A = np.random.randn(m, n)
#b = np.sign(A[:, 0])
b = np.sign(np.random.rand(m,1))
f = LogisticRegression(A, b)
#h = SquaredL2Norm()
h = L2L1Linf(lamda=1.0/m, B=1)
L = 0.25
x0 = np.zeros(n)
maxitrs = 100
x1, F1, G1, _ = BPG(f, h, L, x0, maxitrs, verbskip=10)
x2, F2, G2, _, _, _ = ABPG_gain(f, h, L, x0, gamma=2, maxitrs=maxitrs,
restart=False, verbskip=10)
if __name__ == "__main__":
test_L2L1Linf() |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import matplotlib.pyplot as plt
from .functions import *
def plotTSE(h, dim=10, nTriples=10, nThetas=100, R=1, onSimplex=True,
randseed=-1):
"""
Plot estimated triangle scaling exponents of Bregman distance.
"""
if randseed >= 0:
np.random.seed(randseed)
plt.figure()
for k in range(nTriples):
x = R * np.random.rand(dim)
y = R * np.random.rand(dim)
z = R * np.random.rand(dim)
if onSimplex:
x = x / x.sum()
y = y / y.sum()
z = z / z.sum()
theta = np.arange(1.0/nThetas, 1, 1.0/nThetas)
expnt = np.zeros(theta.shape)
dyz = h.divergence(y, z)
for i in range(theta.size):
c = theta[i]
dtheta = h.divergence((1-c)*x+c*y, (1-c)*x+c*z)
expnt[i] = np.log(dtheta / dyz) / np.log(c)
#expnt[i] = (np.log(dtheta) - np.log(dyz)) / np.log(c)
plt.plot(theta, expnt)
plt.xlim([0,1])
#plt.ylim([0,5])
#plt.xlabel(r'$\theta$')
#plt.ylabel(r'$\hat{\gamma}(\theta)$')
plt.tight_layout()
def plotTSE0(h, dim=10, xscale=1, yscale=1, zscale=2, nThetas=1000, maxTheta=1):
"""
Plot estimated triangle scaling exponents of Bregman distance.
"""
plt.figure()
# test for extreme cases
#x = np.zeros(dim)
x = xscale*np.ones(dim)
#x = np.random.rand(dim)
y = yscale*np.ones(dim)
z = zscale*np.ones(dim)
#y = yscale*np.random.rand(dim)
#z = zscale*np.random.rand(dim)
theta = np.arange(1.0/nThetas, maxTheta, 1.0/nThetas)
expnt = np.zeros(theta.shape)
dyz = h.divergence(y, z)
for i in range(theta.size):
c = theta[i]
dtheta = h.divergence((1-c)*x+c*y, (1-c)*x+c*z)
expnt[i] = np.log(dtheta / dyz) / np.log(c)
#expnt[i] = (np.log(dtheta) - np.log(dyz)) / np.log(c)
plt.plot(theta, expnt)
plt.xlim([0,maxTheta])
#plt.ylim([0,5])
#plt.xlabel(r'$\theta$')
#plt.ylabel(r'$\hat{\gamma}(\theta)$')
plt.tight_layout()
if __name__ == "__main__":
#h = ShannonEntropy()
#h = BurgEntropy()
h = PowerNeg1()
#h = SquaredL2Norm()
#h = SumOf2nd4thPowers(1)
plotTSE(h, nThetas=1000)
#plotTSE0(h, xscale=1e-8, yscale=10, zscale=20, nThetas=10000, maxTheta=1e-2)
|
"""
This demo code for Adafruit's CircuitPlayground Express (CPX) is
compatible with the Device Simulator Express Visual Studio Code extension.
The extension allows you to code CircuitPython for your
CircuitPlayground Express (CPX) by testing and debugging on
the device simulator, before running your code on the actual
device. The serial monitor easily allows you to
observe device output.
Download the extension here:
https://marketplace.visualstudio.com/items?itemName=ms-python.devicesimulatorexpress
To view printed output when the device is running,
use the the following command in Visual Studio Code:
"Device Simulator Express: Open Serial Monitor"
Copyright (c) 2019 Microsoft
"""
import random
import time
from adafruit_circuitplayground.express import cpx
# Set this to False to turn on off the capacitive touch tones
TOUCH_PIANO = True
# NeoPixel color names
WHITE = (50, 50, 50)
DARK_ORANGE = (80, 44, 0)
ORANGE = (244, 117, 33)
YELLOW_ORANGE = (216, 59, 1)
BLACK = (0, 0, 0)
# Dim the lights a bit, they're bright
cpx.pixels.brightness = 0.3
# SPEAKER - Play startup noise on boot
cpx.play_file("Fanfare.wav")
def wheel(position):
# Return color value for position
if position < 0 or position > 255:
return BLACK
if position < 85:
return ORANGE
elif position < 170:
position -= 85
return YELLOW_ORANGE
else:
position -= 170
return WHITE
lights_on = True # Lights on or off
led_on = False # LED on or off
current_pixel = 0 # Counter for all 10 pixels
last_switch = cpx.switch # Last position of the switch
while True:
# LIGHTS - This makes a swirling pattern of orange colors!
if lights_on:
for pixel_pos in range(10):
color = wheel(25 * ((current_pixel + pixel_pos) % 10))
cpx.pixels[pixel_pos] = [int(c * ((10 - (current_pixel + pixel_pos) % 10)) / 10.0) for c in color]
# Each time around we tick off one pixel at a time
if cpx.switch: # depending on the switch we'll go clockwise
current_pixel += 1
if current_pixel > 9:
current_pixel = 0
else: # or counter clockwise, flip the switch to change direction
current_pixel -= 1
if current_pixel < 0:
current_pixel = 9
# BUTTONS - Press and hold to make the lights temporarily dimmer or brighter
if cpx.button_a:
print("Button A pressed - make lights dimmer")
cpx.pixels.brightness = 0.1
if cpx.button_b:
print("Button B pressed - make lights brighter")
cpx.pixels.brightness = 0.5
if not cpx.button_a and not cpx.button_b:
# Go back to default brightness if neither button is pressed
cpx.pixels.brightness = 0.3
# SWITCH - Check the switch
if cpx.switch:
if last_switch != cpx.switch:
print("Switch moved left")
else:
if last_switch != cpx.switch:
print("Switch moved right")
last_switch = cpx.switch
# CAPACITIVE TOUCH - Touch A1 - A7 on the device to play music
if TOUCH_PIANO:
if cpx.touch_A4:
cpx.play_tone(524, 0.25)
elif cpx.touch_A5:
cpx.play_tone(588, 0.25)
elif cpx.touch_A6:
cpx.play_tone(660, 0.25)
elif cpx.touch_A7:
cpx.play_tone(698, 0.25)
elif cpx.touch_A1:
cpx.play_tone(784, 0.25)
elif cpx.touch_A2:
cpx.play_tone(880, 0.25)
elif cpx.touch_A3:
cpx.play_tone(988, 0.25)
# SENSORS - Print sensor data every time the lights go around
if current_pixel == 0:
x, y, z = cpx.acceleration
print("Temperature: %0.1f *C" % cpx.temperature)
print("Light Level: %d" % cpx.light)
print("Accelerometer: (%0.1f, %0.1f, %0.1f) m/s^2" % (x, y, z))
print("-" * 40)
# SHAKE - Look for a shake with the given threshold
if cpx.shake(shake_threshold=20):
# Turn off lights and pause sensor reporting
cpx.pixels.fill(BLACK)
# Switch the neopixels on and off
lights_on = not lights_on
# Switch the red LED on and off
led_on = not led_on
time.sleep(0.02)
# LED - Turns on and off the little LED next to USB on
cpx.red_led = led_on
# Go back to the beginning of the while True loop! |
import os
import logging
import flask
from flask import request, jsonify
from flask import json
from flask_cors import CORS
from dapr.clients import DaprClient
logging.basicConfig(level=logging.INFO)
app = flask.Flask(__name__)
CORS(app)
@app.route('/order', methods=['GET'])
def getOrder():
app.logger.info('order service called')
with DaprClient() as d:
d.wait(5)
try:
id = request.args.get('id')
if id:
# Get the order status from Cosmos DB via Dapr
state = d.get_state(store_name='orders', key=id)
if state.data:
resp = jsonify(json.loads(state.data))
else:
resp = jsonify('no order with that id found')
resp.status_code = 200
return resp
else:
resp = jsonify('Order "id" not found in query string')
resp.status_code = 500
return resp
except Exception as e:
app.logger.info(e)
return str(e)
finally:
app.logger.info('completed order call')
@app.route('/order', methods=['POST'])
def createOrder():
app.logger.info('create order called')
with DaprClient() as d:
d.wait(5)
try:
# Get ID from the request body
id = request.json['id']
if id:
# Save the order to Cosmos DB via Dapr
d.save_state(store_name='orders', key=id, value=json.dumps(request.json))
resp = jsonify(request.json)
resp.status_code = 200
return resp
else:
resp = jsonify('Order "id" not found in query string')
resp.status_code = 500
return resp
except Exception as e:
app.logger.info(e)
return str(e)
finally:
app.logger.info('created order')
@app.route('/order', methods=['DELETE'])
def deleteOrder():
app.logger.info('delete called in the order service')
with DaprClient() as d:
d.wait(5)
id = request.args.get('id')
if id:
# Delete the order status from Cosmos DB via Dapr
try:
d.delete_state(store_name='orders', key=id)
return f'Item {id} successfully deleted', 200
except Exception as e:
app.logger.info(e)
return abort(500)
finally:
app.logger.info('completed order delete')
else:
resp = jsonify('Order "id" not found in query string')
resp.status_code = 400
return resp
app.run(host='0.0.0.0', port=os.getenv('PORT', '5000')) |
import logging
from typing import Optional, Dict, Any, List, Tuple, NamedTuple
import torch
from torch import nn
from data.edits import Edit
from dpu_utils.ptutils import BaseComponent
from mlcomponents.seqdecoding import SeqDecoder
from mlcomponents.seqencoder import SequenceEncoder
class CopyEditor(BaseComponent):
LOGGER = logging.getLogger('CopyEditor')
def __init__(self, name: str, input_sequence_encoder: SequenceEncoder,
edit_encoder: SequenceEncoder,
output_sequence_decoder: SeqDecoder,
hyperparameters: Optional[Dict[str, Any]] = None,
learn_bidirectional_edits: bool=True) -> None:
super(CopyEditor, self).__init__(name, hyperparameters)
self.__input_sequence_encoder = input_sequence_encoder
self.__edit_encoder = edit_encoder
self.__output_sequence_decoder = output_sequence_decoder
self.__learn_reverse_edits = learn_bidirectional_edits
self.__reverse_edit_layer = None
def _finalize_component_metadata_and_model(self) -> None:
if self.__learn_reverse_edits:
self.__reverse_edit_layer = nn.Linear(
in_features=self.__edit_encoder.summary_state_size,
out_features=self.__edit_encoder.summary_state_size
)
@property
def input_sequence_encoder(self):
return self.__input_sequence_encoder
@property
def output_sequence_decoder(self):
return self.__output_sequence_decoder
@property
def edit_encoder(self):
return self.__edit_encoder
@classmethod
def default_hyperparameters(cls) -> Dict[str, Any]:
return { }
def _load_metadata_from_sample(self, data_to_load: Edit) -> None:
self.__input_sequence_encoder.load_metadata_from_sample(data_to_load.input_sequence)
if self.__learn_reverse_edits:
self.__input_sequence_encoder.load_metadata_from_sample(data_to_load.output_sequence)
self.__output_sequence_decoder.load_metadata_from_sample(SeqDecoder.InputOutputSequence(
input_sequence=data_to_load.input_sequence,
output_sequence=data_to_load.output_sequence
))
if self.__learn_reverse_edits:
self.__output_sequence_decoder.load_metadata_from_sample(SeqDecoder.InputOutputSequence(
input_sequence=data_to_load.output_sequence,
output_sequence=data_to_load.input_sequence,
))
# If the edit encoder is using the same token encoders as input/output then things will be counted more
# than 1 times
self.__edit_encoder.load_metadata_from_sample(data_to_load)
TensorizedData = NamedTuple('CopyEditorTensorizedData', [
('input_sequence', Any),
('input_sequence_r', Any),
('output_sequence', Any),
('output_sequence_r', Any),
('aligned_edits', Any),
])
def load_data_from_sample(self, data_to_load: Edit) -> Optional['CopyEditor.TensorizedData']:
return self.TensorizedData(
input_sequence=self.__input_sequence_encoder.load_data_from_sample(data_to_load.input_sequence),
input_sequence_r=self.__input_sequence_encoder.load_data_from_sample(data_to_load.output_sequence)
if self.__learn_reverse_edits else None,
output_sequence=self.__output_sequence_decoder.load_data_from_sample(data_to_load),
output_sequence_r=self.__output_sequence_decoder.load_data_from_sample(SeqDecoder.InputOutputSequence(
input_sequence= data_to_load.output_sequence,
output_sequence= data_to_load.input_sequence,
)) if self.__learn_reverse_edits else None,
aligned_edits=self.__edit_encoder.load_data_from_sample(data_to_load)
)
def initialize_minibatch(self) -> Dict[str, Any]:
return {
'input_sequences': self.__input_sequence_encoder.initialize_minibatch(),
'input_sequences_r': self.__input_sequence_encoder.initialize_minibatch() if self.__learn_reverse_edits else None,
'output_sequences': self.__output_sequence_decoder.initialize_minibatch(),
'output_sequences_r': self.__output_sequence_decoder.initialize_minibatch() if self.__learn_reverse_edits else None,
'aligned_edits': self.__edit_encoder.initialize_minibatch()
}
def extend_minibatch_by_sample(self, datapoint: 'CopyEditor.TensorizedData', accumulated_minibatch_data: Dict[str, Any]) -> bool:
continue_extending = self.__input_sequence_encoder.extend_minibatch_by_sample(
datapoint=datapoint.input_sequence,
accumulated_minibatch_data=accumulated_minibatch_data['input_sequences'])
continue_extending &= self.__output_sequence_decoder.extend_minibatch_by_sample(
datapoint=datapoint.output_sequence,
accumulated_minibatch_data=accumulated_minibatch_data['output_sequences'])
if self.__learn_reverse_edits:
continue_extending &= self.__input_sequence_encoder.extend_minibatch_by_sample(
datapoint=datapoint.input_sequence_r,
accumulated_minibatch_data=accumulated_minibatch_data['input_sequences_r'])
continue_extending &= self.__output_sequence_decoder.extend_minibatch_by_sample(
datapoint=datapoint.output_sequence_r,
accumulated_minibatch_data=accumulated_minibatch_data['output_sequences_r'])
continue_extending &= self.__edit_encoder.extend_minibatch_by_sample(
datapoint=datapoint.aligned_edits,
accumulated_minibatch_data=accumulated_minibatch_data['aligned_edits']
)
return continue_extending
def finalize_minibatch(self, accumulated_minibatch_data: Dict[str, Any]) -> Dict[str, Any]:
return {
'input_sequences': self.__input_sequence_encoder.finalize_minibatch(accumulated_minibatch_data['input_sequences']),
'input_sequences_r': self.__input_sequence_encoder.finalize_minibatch(accumulated_minibatch_data['input_sequences_r']) if self.__learn_reverse_edits else None,
'output_sequences': self.__output_sequence_decoder.finalize_minibatch(accumulated_minibatch_data['output_sequences']),
'output_sequences_r': self.__output_sequence_decoder.finalize_minibatch(accumulated_minibatch_data['output_sequences_r']) if self.__learn_reverse_edits else None,
'aligned_edits': self.__edit_encoder.finalize_minibatch(accumulated_minibatch_data['aligned_edits']),
'edit_type': None
}
def forward(self, *, input_sequences: Dict[str, Any], output_sequences: Dict[str, Any],
input_sequences_r: Dict[str, Any], output_sequences_r: Dict[str, Any], aligned_edits: Dict[str, Any],
edit_type: Optional[Dict[str, Any]]):
input_encoding = self.__input_sequence_encoder.forward(
input_sequence_data=input_sequences,
return_embedded_sequence=True
)
if self.__learn_reverse_edits:
input_encoding_r = self.__input_sequence_encoder.forward(
input_sequence_data=input_sequences_r,
return_embedded_sequence=True)
memories, memories_lengths, output_state, input_sequence_token_embeddings = input_encoding
if self.__learn_reverse_edits:
memories_r, memories_lengths_r, output_state_r, input_sequence_token_embeddings_r = input_encoding_r
_, _, edit_representations = self.__edit_encoder.forward(input_sequence_data=aligned_edits)
initial_state = torch.cat([output_state, edit_representations], dim=-1)
decoder_loss = self.__output_sequence_decoder.forward(memories=memories, memories_lengths=memories_lengths,
initial_state=initial_state,
input_sequence_token_embeddings=input_sequence_token_embeddings,
additional_decoder_input=edit_representations,
**output_sequences)
if self.__learn_reverse_edits:
reverse_edit_rep = self.__reverse_edit_layer(edit_representations)
initial_state_r = torch.cat([output_state_r, reverse_edit_rep], dim=-1)
decoder_loss_r = self.__output_sequence_decoder.forward(memories=memories_r, memories_lengths=memories_lengths_r,
initial_state=initial_state_r,
input_sequence_token_embeddings=input_sequence_token_embeddings_r,
additional_decoder_input=reverse_edit_rep,
**output_sequences_r)
else:
decoder_loss_r = 0
decoder_loss = decoder_loss + decoder_loss_r
return decoder_loss
def get_edit_representations(self, mb_data):
with torch.no_grad():
_, _, edit_representations = self.__edit_encoder.forward(input_sequence_data=mb_data['aligned_edits'])
return edit_representations
def greedy_decode(self, input_sequences: Dict[str, Any], aligned_edits: Dict[str, Any],
ground_input_sequences: List[List[str]], max_length: int=50,
fixed_edit_representations: Optional[torch.Tensor]=None) -> List[Tuple[List[List[str]], List[float]]]:
with torch.no_grad():
ground_input_sequences, initial_state, memories, memory_lengths, edit_representations = self.__prepare_decoding(aligned_edits,
ground_input_sequences,
input_sequences,
fixed_edit_representations)
return self.__output_sequence_decoder.greedy_decode(memories, memory_lengths,
initial_state=initial_state, max_length=max_length,
memories_str_representations=ground_input_sequences,
additional_decoder_input=edit_representations)
def beam_decode(self, input_sequences: Dict[str, Any], aligned_edits: Dict[str, Any],
ground_input_sequences: List[List[str]], max_length: int=50,
fixed_edit_representations: Optional[torch.Tensor]=None) -> List[Tuple[List[List[str]], List[float]]]:
with torch.no_grad():
ground_input_sequences, initial_state, memories, memory_lengths, edit_representations = self.__prepare_decoding(aligned_edits,
ground_input_sequences,
input_sequences,
fixed_edit_representations)
return self.__output_sequence_decoder.beam_decode(memories, memory_lengths,
initial_state=initial_state, max_length=max_length,
memories_str_representations=ground_input_sequences,
additional_decoder_input= edit_representations
)
def __prepare_decoding(self, aligned_edits, ground_input_sequences, input_sequences,
fixed_edit_representations: Optional[torch.Tensor]):
memories, memory_lengths, output_state = self.__input_sequence_encoder.forward(
input_sequence_data=input_sequences)
if fixed_edit_representations is None:
_, _, edit_representation = self.__edit_encoder.forward(input_sequence_data=aligned_edits)
else:
edit_representation = fixed_edit_representations
initial_state = torch.cat([output_state, edit_representation], dim=-1)
return ground_input_sequences, initial_state, memories, memory_lengths, edit_representation
def compute_likelihood(self, *, input_sequences: Dict[str, Any], output_sequences: Dict[str, Any],
input_sequences_r: Dict[str, Any], output_sequences_r: Dict[str, Any], aligned_edits: Dict[str, Any],
edit_type: Optional[Dict[str, Any]]):
with torch.no_grad():
memories, memories_lengths, output_state = self.__input_sequence_encoder.forward(input_sequence_data=input_sequences)
_, _, edit_representations = self.__edit_encoder.forward(input_sequence_data=aligned_edits)
initial_state = torch.cat([output_state, edit_representations], dim=-1)
return self.__output_sequence_decoder.compute_likelihood(memories=memories,
memories_lengths=memories_lengths,
initial_state=initial_state,
additional_decoder_input=edit_representations,
**output_sequences)
|
from collections import Hashable
from typing import Optional, Dict, Any, NamedTuple
import numpy as np
import torch
from dpu_utils.mlutils import Vocabulary
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence
from data.edits import ChangeType, sequence_diff, AlignedDiffRepresentation
from dpu_utils.ptutils import BaseComponent
from mlcomponents.embeddings import SequenceEmbedder
class AlignedEditTokensEmbedding(BaseComponent):
"""
Given two sequences of tokens, compute the diff that
aligns them and embed them.
"""
def __init__(self, name: str, token_encoder: SequenceEmbedder,
hyperparameters: Optional[Dict[str, Any]] = None) -> None:
super(AlignedEditTokensEmbedding, self).__init__(name, hyperparameters)
self.__token_encoder = token_encoder
self.__change_type_embedding_layer = None # type: Optional[nn.Embedding]
@classmethod
def default_hyperparameters(cls) -> Dict[str, Any]:
return {
'change_type_embedding_size': 8,
'output_representation_size': None
}
@property
def change_type_embedding_size(self) -> int:
return self.get_hyperparameter('change_type_embedding_size')
@property
def token_encoder(self) -> SequenceEmbedder:
return self.__token_encoder
@property
def embedding_size(self) -> int:
return self.__token_encoder.embedding_size * 2 + self.get_hyperparameter('change_type_embedding_size')
@property
def change_type_embedding_matrix(self) -> torch.Tensor:
assert self.__change_type_embedding_layer is not None, 'Embeddings have not been initialized.'
return self.__change_type_embedding_layer.weight
def _load_metadata_from_sample(self, data_to_load) -> None:
self.__token_encoder.load_metadata_from_sample(data_to_load.output_sequence)
self.__token_encoder.load_metadata_from_sample(data_to_load.input_sequence)
def _finalize_component_metadata_and_model(self) -> None:
self.__change_type_embedding_layer = nn.Embedding(num_embeddings=len(ChangeType),
embedding_dim=self.get_hyperparameter('change_type_embedding_size'),
)
if self.get_hyperparameter('output_representation_size') is not None:
self.__output_layer = nn.Linear(in_features=2*self.__token_encoder.embedding_size + self.get_hyperparameter('change_type_embedding_size'),
out_features=self.get_hyperparameter('output_representation_size'))
TensorizedData = NamedTuple('AlignedEditEncoderTensorizedData', [
('before_token_ids', np.ndarray),
('after_token_ids', np.ndarray),
('change_ids', np.ndarray),
('length', int)
])
def load_data_from_sample(self, data_to_load: Any) -> Optional['AlignedEditTokensEmbedding.TensorizedData']:
max_seq_length = self.__token_encoder.get_hyperparameter('max_seq_length')
pad_id = self.__token_encoder.load_data_from_sample([Vocabulary.get_pad()]).token_ids[0]
aligned_edit_representation = sequence_diff(data_to_load.input_sequence, data_to_load.output_sequence)
change_types=[change_type.value for change_type in aligned_edit_representation.change_type[:max_seq_length]]
before_tokens=self.__token_encoder.load_data_from_sample(aligned_edit_representation.before_tokens).token_ids[:max_seq_length]
after_tokens=self.__token_encoder.load_data_from_sample(aligned_edit_representation.after_tokens).token_ids[:max_seq_length]
assert len(change_types) == len(before_tokens) == len(after_tokens)
diff = self.TensorizedData(
before_token_ids=np.array(before_tokens, dtype=np.int32),
after_token_ids=np.array(after_tokens, dtype=np.int32),
change_ids=np.array(change_types, dtype=np.int8),
length=len(change_types)
)
return diff
def initialize_minibatch(self):
return {'before_token_ids': [],
'after_token_ids': [],
'change_ids': [],
'lengths': []
}
def extend_minibatch_by_sample(self, datapoint: 'AlignedEditTokensEmbedding.TensorizedData', accumulated_minibatch_data: Dict[str, Any]) -> bool:
accumulated_minibatch_data['before_token_ids'].append(datapoint.before_token_ids)
accumulated_minibatch_data['after_token_ids'].append(datapoint.after_token_ids)
accumulated_minibatch_data['change_ids'].append(datapoint.change_ids)
accumulated_minibatch_data['lengths'].append(datapoint.length)
return True
def finalize_minibatch(self, accumulated_minibatch_data: Dict[str, Any]) -> Dict[str, Any]:
lengths = accumulated_minibatch_data['lengths']
accumulated_token_ids_before = accumulated_minibatch_data['before_token_ids']
accumulated_token_ids_after = accumulated_minibatch_data['after_token_ids']
accumulated_change_ids = accumulated_minibatch_data['change_ids']
max_seq_size = max(lengths)
batch_size = len(lengths)
token_ids_before = np.zeros((batch_size, max_seq_size), dtype=np.int32)
token_ids_after = np.zeros((batch_size, max_seq_size), dtype=np.int32)
change_ids = np.zeros((batch_size, max_seq_size), dtype=np.int32)
for i in range(batch_size):
example_length = lengths[i]
token_ids_before[i, :example_length] = accumulated_token_ids_before[i]
token_ids_after[i, :example_length] = accumulated_token_ids_after[i]
change_ids[i, :example_length] = accumulated_change_ids[i]
return {
'token_ids_before': torch.tensor(token_ids_before, dtype=torch.int64, device=self.device),
'token_ids_after': torch.tensor(token_ids_after, dtype=torch.int64, device=self.device),
'change_ids': torch.tensor(change_ids, dtype=torch.int64, device=self.device),
'lengths': torch.tensor(lengths, dtype=torch.int64, device=self.device)
}
def forward(self, *, token_ids_before: torch.Tensor, token_ids_after: torch.Tensor, change_ids: torch.Tensor,
lengths: torch.Tensor, as_packed_sequence: bool=True, add_sequence_related_annotations: bool=True):
embedded_tokens_before, lengths = self.__token_encoder.forward(token_ids=token_ids_before,
lengths=lengths, as_packed_sequence=False,
add_sequence_related_annotations=add_sequence_related_annotations)
embedded_tokens_after, _ = self.__token_encoder.forward(token_ids=token_ids_after, lengths=lengths,
as_packed_sequence=False,
add_sequence_related_annotations=add_sequence_related_annotations)
change_embeddings = self.__change_type_embedding_layer(change_ids) # B x D_c
embeddings = torch.cat([embedded_tokens_before, embedded_tokens_after, change_embeddings], dim=-1) # B x (2D + D_c)
if self.get_hyperparameter('output_representation_size') is not None:
embeddings = self.__output_layer(embeddings)
if not as_packed_sequence:
return embeddings, lengths
sorted_lengths, indices = torch.sort(lengths, descending=True)
# The reverse map, to restore the original order (over batches)
reverse_map = torch.zeros_like(indices).scatter_(dim=0, index=indices, src=torch.arange(indices.shape[0], device=self.device)) # B
return pack_padded_sequence(embeddings[indices], sorted_lengths, batch_first=True), reverse_map
|
#!/usr/bin/env python3
"""
Test the ability of the model to do one-shot generation, given an edit representation of a different sample of the same edit type.
Usage:
oneshotgentesting.py [options] MODEL_FILENAME DATA
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
--sample-per-type=<num> Number of samples per type
--data-type=<type> The type of data to be used.
--cpu Use cpu only.
--quiet Do not show progress bar.
-h --help Show this screen.
--debug Enable debug routines. [default: False]
"""
import logging
from collections import defaultdict
from typing import List
import torch
from tqdm import tqdm
from docopt import docopt
from dpu_utils.utils import run_and_debug, RichPath
from data.edits import Edit
from data.loading import load_data_by_type
from dpu_utils.ptutils import BaseComponent
def evaluate_oneshot(model: BaseComponent, evaluate_oneshot: List[Edit], limit_per_category: int):
model.eval()
logging.info('Tensorizing data...')
all_data = [model.load_data_from_sample(d) for d in evaluate_oneshot]
data_iter = iter(all_data)
representations = []
continue_iterating = True
logging.info('Computing edit representation on %d examples', len(evaluate_oneshot))
start_idx = 0
while continue_iterating:
mb_data, continue_iterating, num_elements = model.create_minibatch(data_iter, max_num_items=20)
if num_elements > 0:
representations.extend(model.get_edit_representations(mb_data))
else:
assert not continue_iterating
start_idx += num_elements
assert len(representations) == len(all_data)
# Do an all-vs-all
sample_idxs_by_type = defaultdict(list)
for i, edit in enumerate(evaluate_oneshot):
sample_idxs_by_type[edit.edit_type].append(i)
num_samples_per_type = defaultdict(int)
num_correct_per_type = defaultdict(int)
num_correct_at5_per_type = defaultdict(int)
for edit_type, sample_idxs in sample_idxs_by_type.items():
samples_to_see = sample_idxs[:limit_per_category]
for sample_idx in tqdm(samples_to_see, leave=False, dynamic_ncols=True, desc=edit_type):
mb_samples = [all_data[i] for i in samples_to_see if i != sample_idx]
sample_mb_data, continue_iterating, num_elements = model.create_minibatch(mb_samples,
max_num_items=len(mb_samples)+1)
assert num_elements == len(samples_to_see) - 1 and not continue_iterating
edit_representations = representations[sample_idx].unsqueeze(0).expand(len(samples_to_see)-1, -1)
beam = model.beam_decode(input_sequences=sample_mb_data['input_sequences'],
aligned_edits=None,
ground_input_sequences=[evaluate_oneshot[i].input_sequence for i in samples_to_see if i != sample_idx],
max_length=50,
fixed_edit_representations=edit_representations)
for i, other_sample_idx in enumerate((i for i in samples_to_see if i != sample_idx)):
top_prediction = beam[i][0][0]
if top_prediction == evaluate_oneshot[other_sample_idx].output_sequence:
num_correct_per_type[edit_type] += 1
num_correct_at5_per_type[edit_type] += 1 if any(beam[i][0][k] == evaluate_oneshot[other_sample_idx].output_sequence for k in range(5)) else 0
num_samples_per_type[edit_type] += 1
print(f'\t{edit_type}\t Acc: {num_correct_per_type[edit_type]/num_samples_per_type[edit_type]:%} Acc@5: {num_correct_at5_per_type[edit_type]/num_samples_per_type[edit_type]:%}')
total_correct, total_correct_at_5, total_elements = 0, 0, 0
for edit_type, num_samples in num_samples_per_type.items():
total_correct += num_correct_per_type[edit_type]
total_correct_at_5 += num_correct_at5_per_type[edit_type]
total_elements += num_samples
print(f'Total: {total_correct/total_elements:%}')
def run(arguments):
azure_info_path = arguments.get('--azure-info', None)
model_path = RichPath.create(arguments['MODEL_FILENAME'], azure_info_path)
if arguments['--cpu']:
model = BaseComponent.restore_model(model_path, 'cpu')
else:
model = BaseComponent.restore_model(model_path)
test_data_path = RichPath.create(arguments['DATA'], azure_info_path)
test_data = load_data_by_type(test_data_path, arguments['--data-type'], cleanup=False, as_list=True)
if arguments['--sample-per-type'] is None:
lim = 100000
else:
lim = int(arguments['--sample-per-type'])
evaluate_oneshot(model, test_data, limit_per_category=lim)
if __name__ == '__main__':
args = docopt(__doc__)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
run_and_debug(lambda: run(args), args.get('--debug', False))
|
#!/usr/bin/env python
"""
Usage:
outputparallelpredictions.py [options] MODEL_FILENAME TEST_DATA OUT_PREFIX
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
--data-type=<type> The type of data to be used. Possible options fce, code, wikiatomicedits, wikiedits. [default: fce]
--greedy Use greedy decoding rather than beam search.
--cpu Use cpu only.
--verbose Print predictions to console.
--num-predictions=N Number of predictions to output. [default: 1]
--quiet Do not show progress bar.
-h --help Show this screen.
--debug Enable debug routines. [default: False]
"""
import logging
from typing import List
from docopt import docopt
from dpu_utils.utils import run_and_debug, RichPath
from data.loading import load_data_by_type
from dpu_utils.ptutils import BaseComponent
def __join_sentence(token_list: List[str]) -> str:
s = ''
in_bpe = False
for t in token_list:
if t == '__sow':
in_bpe = True
elif t == '__eow':
in_bpe = False
s += ' '
elif in_bpe:
s += t
elif t.startswith('##'):
s = s[:-1] + t[2:] + ' '
else:
s += t + ' '
return s
def run(arguments):
azure_info_path = arguments.get('--azure-info', None)
num_predictions = int(arguments['--num-predictions'])
model_path = RichPath.create(arguments['MODEL_FILENAME'], azure_info_path)
if arguments['--cpu']:
model = BaseComponent.restore_model(model_path, 'cpu')
else:
model = BaseComponent.restore_model(model_path)
test_data_path = RichPath.create(arguments['TEST_DATA'], azure_info_path)
test_data = load_data_by_type(test_data_path, arguments['--data-type'])
logging.info('Running test on %d examples', len(test_data))
model.eval()
all_data = [model.load_data_from_sample(d) for d in test_data]
ground_input_sequences = [d.input_sequence for d in test_data]
data_iter = iter(all_data)
predictions = []
is_full = True
start_idx = 0
while is_full:
mb_data, is_full, num_elements = model.create_minibatch(data_iter, max_num_items=10)
mb_ground_input_sequences= ground_input_sequences[start_idx:start_idx + num_elements]
if num_elements > 0:
logging.info('Before decoding predictions: %d', len(predictions))
if arguments['--greedy']:
predicted_outputs = model.greedy_decode(input_sequences=mb_data['input_sequences'],
ground_input_sequences=mb_ground_input_sequences)
else:
predicted_outputs = model.beam_decode(input_sequences=mb_data['input_sequences'],
ground_input_sequences=mb_ground_input_sequences)
predictions.extend(predicted_outputs)
logging.info('After decoding predictions: %d', len(predictions))
start_idx += num_elements
if not is_full:
break
prediction_files = []
for i in range(num_predictions):
prediction_files.append(open(arguments['OUT_PREFIX'] + f'-after-{i}.txt', 'w'))
with open(arguments['OUT_PREFIX'] + '-before.txt', 'w') as before_f:
assert len(ground_input_sequences) == len(predictions)
for ground, predicted_beam in zip(ground_input_sequences, predictions):
before_f.write(__join_sentence(ground) + '\n')
for i in range(num_predictions):
predicted_sentence = predicted_beam[0][i]
predicted_sentence = __join_sentence(predicted_sentence)
prediction_files[i].write(predicted_sentence + '\n')
if __name__ == '__main__':
args = docopt(__doc__)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
run_and_debug(lambda: run(args), args.get('--debug', False))
|
#!/usr/bin/env python
"""
Usage:
tsnejson.py [options] MODEL_FILENAME TEST_DATA OUT_PATH
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
--data-type=<type> The type of data to be used. Possible options fce, code, wikiatomicedits. [default: fce]
--quiet Do not show progress bar.
-h --help Show this screen.
--debug Enable debug routines. [default: False]
"""
import logging
from typing import List, Dict, Any
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from docopt import docopt
from dpu_utils.utils import run_and_debug, RichPath
from data.loading import load_data_by_type
from data.representationviz import RepresentationsVisualizer
from dpu_utils.ptutils import BaseComponent
def test(model: BaseComponent, test_data: List[Dict[str, Any]], out_file_path: str):
model.eval()
all_data = [model.load_data_from_sample(d) for d in test_data]
data_iter = iter(all_data)
representations = []
is_full = True
start_idx = 0
while is_full:
mb_data, is_full, num_elements = model.create_minibatch(data_iter, max_num_items=200)
if num_elements > 0:
representations.append(model.edit_encoder.get_summary(input_sequence_data=mb_data['aligned_edits']))
start_idx += num_elements
if not is_full:
break
all_labels = set(t.get('edit_type', '?').split('+')[0] for t in test_data)
colormap = plt.get_cmap('Paired')
label_to_color = {}
for i, label in enumerate(all_labels):
label_to_color[label] = colormap(int(float(i) / len(all_labels) * colormap.N))
representations = np.array(representations)
viz = RepresentationsVisualizer(labeler=lambda d: d.get('edit_type', '?').split('+')[0],
colorer=lambda d: label_to_color[d.get('edit_type', '?').split('+')[0]])
viz.save_tsne_as_json(test_data, representations, save_file=out_file_path)
def run(arguments):
azure_info_path = arguments.get('--azure-info', None)
model_path = RichPath.create(arguments['MODEL_FILENAME'], azure_info_path)
model = BaseComponent.restore_model(model_path)
test_data_path = RichPath.create(arguments['TEST_DATA'], azure_info_path)
test_data = load_data_by_type(test_data_path, arguments['--data-type'])
test(model, test_data, arguments['OUT_PATH'])
if __name__ == '__main__':
args = docopt(__doc__)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
run_and_debug(lambda: run(args), args.get('--debug', False))
|
from typing import Optional
from pytorch_transformers import BertConfig
from editrepcomponents.alignededitencoder import AlignedEditTokensEmbedding
from mlcomponents.seqdecoding.spancopydecoder import GruSpanCopyingDecoder
from mlcomponents.seqencoder import BiGruSequenceEncoder
from editrepcomponents.copyeditor import CopyEditor
from mlcomponents.embeddings import TokenSequenceEmbedder
from mlcomponents.seqdecoding import GruCopyingDecoder, GruDecoder
from mlcomponents.seqdecoding import LuongAttention
from mlcomponents.encoderdecoder import EncoderDecoder
def create_copy_seq2seq_model(bidirectional: bool=False) -> CopyEditor:
"""A Seq2Seq Editor Model with Attention and Copying"""
seq_embeddings = TokenSequenceEmbedder('SeqTokenEmbedder',
hyperparameters={'max_seq_length': 50, 'min_word_count_threshold': 11, })
input_sequence_encoder = BiGruSequenceEncoder('BiGruInputEncoder',
token_embedder=seq_embeddings,
hyperparameters={
'num_layers': 2,
'hidden_size': 64,
})
attention = LuongAttention('StandardAttention',
hyperparameters={'memories_hidden_dimension': input_sequence_encoder.output_states_size})
edit_token_embeddings = AlignedEditTokensEmbedding('EditEncoder', token_encoder=seq_embeddings)
edit_encoder = BiGruSequenceEncoder('BiGruEditEncoder',
token_embedder=edit_token_embeddings,
hyperparameters={
'num_layers': 2,
'hidden_size': 64,
})
decoder = GruCopyingDecoder('GruCopyDecoder',
token_encoder=seq_embeddings,
standard_attention=attention,
hyperparameters={'initial_state_size':
edit_encoder.get_hyperparameter('hidden_size') *
edit_encoder.get_hyperparameter('num_layers') * 2 +
input_sequence_encoder.get_hyperparameter('hidden_size') *
input_sequence_encoder.get_hyperparameter('num_layers') * 2,
'memories_hidden_dimension': 2 * input_sequence_encoder.get_hyperparameter('hidden_size'),
'additional_inputs_size':
edit_encoder.get_hyperparameter('hidden_size') *
edit_encoder.get_hyperparameter('num_layers') * 2,
'max_memories_length': seq_embeddings.get_hyperparameter('max_seq_length')})
model = CopyEditor('Editor',
input_sequence_encoder=input_sequence_encoder,
edit_encoder=edit_encoder,
output_sequence_decoder=decoder,
learn_bidirectional_edits=bidirectional
)
return model
def create_base_copy_seq2seq_model(pre_trained_seq_embeddings = None, pre_trained_gru = None) -> EncoderDecoder:
"""A Seq2Seq Editor Model with Attention and Copying"""
if pre_trained_seq_embeddings is None:
seq_embeddings = TokenSequenceEmbedder('SeqTokenEmbedder',
hyperparameters={'max_seq_length': 50, 'min_word_count_threshold': 11})
else:
seq_embeddings = pre_trained_seq_embeddings
input_sequence_encoder = BiGruSequenceEncoder('BiGruInputEncoder',
token_embedder=seq_embeddings,
hyperparameters={
'num_layers': 2,
'hidden_size': 128,
})
attention = LuongAttention('StandardAttention',
hyperparameters={'memories_hidden_dimension': input_sequence_encoder.output_states_size})
decoder = GruCopyingDecoder('GruCopyDecoder',
token_encoder=seq_embeddings,
standard_attention=attention,
hyperparameters={'initial_state_size':
input_sequence_encoder.get_hyperparameter('hidden_size') *
input_sequence_encoder.get_hyperparameter('num_layers') * 2,
'memories_hidden_dimension': 2 * input_sequence_encoder.get_hyperparameter('hidden_size'),
'additional_inputs_size': 0,
'max_memories_length': seq_embeddings.get_hyperparameter('max_seq_length')})
model = EncoderDecoder('Seq2SeqModel',
input_sequence_encoder=input_sequence_encoder,
output_sequence_decoder=decoder
)
return model
def create_seq2seq_with_span_copy_model(bidirectional: bool=False):
"""A Seq2Seq Editor Model with Attention and Copying"""
seq_embeddings = TokenSequenceEmbedder('SeqTokenEmbedder',
hyperparameters={
'max_seq_length': 50,
'min_word_count_threshold': 11,
'max_vocabulary_size': 25000
})
input_sequence_encoder = BiGruSequenceEncoder('BiGruInputEncoder',
token_embedder=seq_embeddings,
hyperparameters={
'num_layers': 2,
'hidden_size': 64,
})
attention = LuongAttention('StandardAttention',
hyperparameters={'memories_hidden_dimension': input_sequence_encoder.output_states_size})
copy_attention = LuongAttention('StandardAttention',
hyperparameters={'memories_hidden_dimension': input_sequence_encoder.output_states_size})
edit_token_embeddings = AlignedEditTokensEmbedding('EditEncoder', token_encoder=seq_embeddings)
edit_encoder = BiGruSequenceEncoder('BiGruEditEncoder',
token_embedder=edit_token_embeddings,
hyperparameters={
'num_layers': 2,
'hidden_size': 64,
})
decoder = GruSpanCopyingDecoder('GruCopyDecoder',
token_encoder=seq_embeddings,
standard_attention=attention,
copy_attention=copy_attention,
hyperparameters={'initial_state_size':
edit_encoder.get_hyperparameter('hidden_size') *
edit_encoder.get_hyperparameter('num_layers') * 2 +
input_sequence_encoder.get_hyperparameter('hidden_size') *
input_sequence_encoder.get_hyperparameter('num_layers') * 2 ,
'memories_hidden_dimension': 2 * input_sequence_encoder.get_hyperparameter('hidden_size'),
'additional_inputs_size':
edit_encoder.get_hyperparameter('hidden_size') *
edit_encoder.get_hyperparameter('num_layers') * 2,
'max_memories_length': seq_embeddings.get_hyperparameter('max_seq_length')})
model = CopyEditor('Editor',
input_sequence_encoder=input_sequence_encoder,
edit_encoder=edit_encoder,
output_sequence_decoder=decoder,
learn_bidirectional_edits=bidirectional
)
return model
def create_gru_lm():
seq_embeddings = TokenSequenceEmbedder('SeqTokenEmbedder',
hyperparameters={
'max_seq_length': 50,
'min_word_count_threshold': 11,
'max_vocabulary_size': 30000,
'embedding_size':256
})
decoder = GruDecoder('GruDecoder', seq_embeddings,
hyperparameters= {
'hidden_size': 128,
'initial_state_size': 128
},
include_summarizing_network=False)
return decoder
def create_base_seq2seq_with_span_copy_model(pre_trained_seq_embeddings = None, pre_trained_gru = None):
if pre_trained_seq_embeddings is None:
seq_embeddings = TokenSequenceEmbedder('SeqTokenEmbedder',
hyperparameters={
'max_seq_length': 50,
'min_word_count_threshold': 11,
'max_vocabulary_size': 10000,
'embedding_size': 128
})
else:
seq_embeddings = pre_trained_seq_embeddings
input_sequence_encoder = BiGruSequenceEncoder('BiGruInputEncoder',
token_embedder=seq_embeddings,
hyperparameters={
'num_layers': 1,
'hidden_size': 128,
})
attention = LuongAttention('StandardAttention',
hyperparameters={
'memories_hidden_dimension': input_sequence_encoder.output_states_size,
'lookup_hidden_dimension': 128,
'output_size': 128
})
copy_attention = LuongAttention('StandardAttention',
hyperparameters={
'memories_hidden_dimension': input_sequence_encoder.output_states_size,
'lookup_hidden_dimension': 128,
'output_size': 128
})
decoder = GruSpanCopyingDecoder('GruCopyDecoder',
token_encoder=seq_embeddings,
standard_attention=attention,
copy_attention=copy_attention,
pre_trained_gru=pre_trained_gru,
hyperparameters={'initial_state_size':
input_sequence_encoder.get_hyperparameter('hidden_size') *
input_sequence_encoder.get_hyperparameter('num_layers') * 2 ,
'memories_hidden_dimension': 2 * input_sequence_encoder.get_hyperparameter('hidden_size'),
'additional_inputs_size': 0,
'hidden_size': 128,
'max_memories_length': seq_embeddings.get_hyperparameter('max_seq_length')})
model = EncoderDecoder('CopySpanModel',
input_sequence_encoder=input_sequence_encoder,
output_sequence_decoder=decoder,
)
return model
|
#!/usr/bin/env python
"""
Usage:
test.py [options] MODEL_FILENAME TEST_DATA
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
--data-type=<type> The type of data to be used. Possible options fce, code, wikiatomicedits, wikiedits. [default: fce]
--no-prediction Do not ask the model to make predictions.
--test-size=<size> Size of test set to use. Only need to specify if less than total.
--greedy Use greedy decoding rather than beam search.
--cpu Use cpu only.
--verbose Print predictions to console.
--quiet Do not show progress bar.
-h --help Show this screen.
--debug Enable debug routines. [default: False]
"""
import logging
from typing import List
from itertools import islice
import numpy as np
from docopt import docopt
from dpu_utils.utils import run_and_debug, RichPath
from data.diffviz import diff
from data.edits import Edit, NLEdit
from data.editevaluator import EditEvaluator
from data.loading import load_data_by_type
from data.nlrepresentationviz import NLRepresentationsVisualizer
from data.representationviz import RepresentationsVisualizer
from io import StringIO
from dpu_utils.ptutils import BaseComponent
from nltk.translate.bleu_score import corpus_bleu, sentence_bleu, SmoothingFunction
import os
import re
import sys
import tokenize
import torch
IDENTIFER = 'IDENTIFIER'
NUMBER = 'NUM'
STRING = 'STR'
SMOOTHING_FUNCTION = SmoothingFunction().method2
REF_FILE = 'ref.txt'
ORIG_FILE = 'orig.txt'
PRED_FILE = 'pred.txt'
GOLD_EDIT_M2 = 'gold_edit_m2'
PRED_EDIT_M2 = 'pred_edit_m2'
def test(model: BaseComponent, test_data: List[Edit], model_name: str,
data_type: str, test_predictions: bool, greedy: bool, verbose: bool, mb_size=10):
model.eval()
all_data = [model.load_data_from_sample(d) for d in test_data]
if 'context' in data_type and not model.get_hyperparameter('disable_context_copy'):
ground_input_sequences = [d.input_sequence + d.context_sequence for d in test_data]
else:
ground_input_sequences = [d.input_sequence for d in test_data]
data_iter = iter(all_data)
predictions = []
edit_type_predictions = []
representations = []
is_full = True
gold_likelihood_values = []
start_idx = 0
while is_full:
ground_mb_data = list(islice(data_iter, mb_size))
is_full = len(ground_mb_data) == mb_size
mb_data, _, num_elements = model.create_minibatch(ground_mb_data, max_num_items=mb_size)
mb_ground_input_sequences= ground_input_sequences[start_idx:start_idx + num_elements]
if num_elements > 0:
if test_predictions:
logging.info('Before decoding predictions: %d', len(predictions))
if greedy:
if 'context' in data_type:
predicted_outputs = model.greedy_decode(input_sequences=mb_data['input_sequences'],
aligned_edits=mb_data['aligned_edits'],
context_sequences=mb_data['context_sequences'],
ground_input_sequences=mb_ground_input_sequences)
else:
predicted_outputs = model.greedy_decode(input_sequences=mb_data['input_sequences'],
aligned_edits=mb_data['aligned_edits'],
ground_input_sequences=mb_ground_input_sequences)
else:
if 'context' in data_type:
predicted_outputs = model.beam_decode(input_sequences=mb_data['input_sequences'],
aligned_edits=mb_data['aligned_edits'],
context_sequences=mb_data['context_sequences'],
ground_input_sequences=mb_ground_input_sequences)
else:
predicted_outputs = model.beam_decode(input_sequences=mb_data['input_sequences'],
aligned_edits=mb_data['aligned_edits'],
ground_input_sequences=mb_ground_input_sequences)
predictions.extend(predicted_outputs)
logging.info('After decoding predictions: %d', len(predictions))
representations.extend(model.edit_encoder.get_summary(input_sequence_data=mb_data['aligned_edits']))
# gold_likelihood_values.extend(model.compute_likelihood(**mb_data))
start_idx += num_elements
if not is_full:
break
if test_predictions:
assert len(all_data) == len(predictions)
run_test_suite(predictions, test_data, representations, gold_likelihood_values,
data_type, model_name, verbose)
def __join_sentence(token_list: List[str]) -> str:
s = ''
in_bpe = False
for t in token_list:
if t == '__sow':
in_bpe = True
elif t == '__eow':
in_bpe = False
s += ' '
elif in_bpe:
s += t
else:
s += t + ' '
return s
def visualize_representations(viz, test_data, representations, model_name):
viz.nearest_neighbors_to_html(test_data, representations,
datapoint_to_html=lambda d: diff(__join_sentence(d.input_sequence), __join_sentence(d.output_sequence)),
outfile=model_name+'.html', num_neighbors=5, num_items_to_show=5000)
def run_test_suite(predictions, test_data, representations, gold_likelihood_values,
data_type='code', model_name='', verbose=False):
representations = np.array(representations)
gold_likelihood_values = np.array(gold_likelihood_values)
gold_probs = np.exp(gold_likelihood_values)
if True: #'nl' not in data_type:
visualize_representations(RepresentationsVisualizer(labeler=lambda d: d.edit_type.split('+')[0]),
test_data, representations, model_name)
else:
visualize_representations(NLRepresentationsVisualizer(), test_data, representations, model_name)
if verbose:
for i, (datasample, candidates) in enumerate(zip(test_data, predictions)):
print('Link: {}\n'.format(datasample.provenance))
if 'nl' in data_type:
print('NL Input: {}'.format(' '.join(datasample.nl_sequence)))
print('\nCode Input: ')
print(' '.join(datasample.input_sequence))
print('\n\nGold (Log prob:{}, Prob:{}):'.format(gold_likelihood_values[i], gold_probs[i]))
print(' '.join(datasample.output_sequence))
for predicted_tokens, score in zip(*candidates):
print('\nPredicted (Log prob:{}, Prob:{}):'.format(score, np.exp(score)))
print(' '.join(predicted_tokens))
print('--------------------------')
sys.stdout.flush()
exact_match_errors = 0
structural_match_errors = 0
input_copy_ranks = []
gold_ranks = []
orig_instances = []
references = []
selected_predictions = []
edit_evaluator = EditEvaluator()
for i, (datasample, (predicted_sequences, _)) in enumerate(zip(test_data, predictions)):
if len(predicted_sequences) == 1:
used_prediction_idx = 0
else:
for used_prediction_idx in range(len(predicted_sequences)):
# Since we are testing on edits, predicting the input_sequence is always wrong. Thus skip it!
if predicted_sequences[used_prediction_idx] != datasample.input_sequence:
break
else:
raise Exception('All output sequences are identical to input sequences. This cannot happen.')
beam_strings = [' '.join(s) for s in predicted_sequences]
gold_str = ' '.join(datasample.output_sequence)
try:
gold_ranks.append(beam_strings.index(gold_str))
except ValueError:
pass # not found
input_str = ' '.join(datasample.input_sequence)
try:
input_copy_ranks.append(beam_strings.index(input_str))
except ValueError:
pass # not found
if predicted_sequences[used_prediction_idx] != datasample.output_sequence:
exact_match_errors += 1
gold_token_types = get_token_types(datasample.output_sequence)
predicted_token_types = get_token_types(predicted_sequences[used_prediction_idx])
if gold_token_types != predicted_token_types:
structural_match_errors += 1
references.append([datasample.output_sequence])
orig_instances.append(datasample.input_sequence)
selected_predictions.append(predicted_sequences[used_prediction_idx])
edit_evaluator.add_sample(datasample.input_sequence,
datasample.output_sequence,
predicted_sequences[used_prediction_idx])
if len(selected_predictions) > 0:
with open(model_name + '_' + REF_FILE, 'w+') as f:
for r in references:
f.write(' '.join(r[0]) + '\n')
with open(model_name + '_' + ORIG_FILE, 'w+') as f:
for o in orig_instances:
f.write(' '.join(o) + '\n')
with open(model_name + '_' + PRED_FILE, 'w+') as f:
for s in selected_predictions:
f.write(' '.join(s) + '\n')
orig_sentence_bleu = compute_bleu(references, orig_instances)
pred_sentence_bleu = compute_bleu(references, selected_predictions)
if len(gold_ranks) == 0:
avg_gold_rank = 0.0
else:
avg_gold_rank = sum(gold_ranks)/float(len(gold_ranks))
if len(input_copy_ranks) == 0:
avg_input_copy_rank = 0.0
else:
avg_input_copy_rank = sum(input_copy_ranks)/float(len(input_copy_ranks))
logging.info('Exact match: {}%'.format(100 * (1 - exact_match_errors / len(test_data))))
logging.info('Structural exact match: {}%'.format(100 * (1 - structural_match_errors / len(test_data))))
logging.info('Average gold log likelihood: {}'.format(sum(gold_likelihood_values)/len(test_data)))
logging.info('Average gold probability: {}'.format(sum(gold_probs)/len(test_data)))
logging.info('Gold output sequence is a candidate: {}%'.format(
100 * float(len(gold_ranks))/len(test_data)))
logging.info('Average rank of gold output sequence (when present): {}'.format(avg_gold_rank))
logging.info('Input sequence is a candidate: {}%'.format(
100 * float(len(input_copy_ranks))/len(test_data)))
logging.info('Average rank of input sequence (when present): {}'.format(avg_input_copy_rank))
logging.info('Original avg sentence bleu: {}'.format(orig_sentence_bleu))
logging.info('Prediction avg sentence bleu: {}'.format(pred_sentence_bleu))
for stat, val in edit_evaluator.evaluation_statistics().items():
logging.info('{}: {}'.format(stat, val))
logging.info('Total: {}'.format(len(test_data)))
logging.info('Model: {}'.format(model_name))
def compute_bleu(references, hypotheses):
sentence_scores = []
for ref, hyp in zip(references, hypotheses):
sentence_scores.append(sentence_bleu(ref, hyp, smoothing_function=SMOOTHING_FUNCTION))
return 100*sum(sentence_scores)/len(sentence_scores)
def get_token_types(tokens):
token_types = []
for token in tokens:
if re.match('[a-zA-Z_][a-zA-Z0-9_]*', token) or token == '%UNK%':
token_types.append(IDENTIFER)
elif is_num_literal(token):
token_types.append(NUMBER)
elif re.match(r'(["\'])(?:(?=(\\?))\2.)*?\1', token):
token_types.append(STRING)
else:
token_types.append(token)
return token_types
def compute_prediction_accuracy(predictions, test_data):
num_errors = 0
for i, (datasample, predictions) in enumerate(zip(test_data, predictions)):
for used_prediction_idx in range(len(predictions)):
# Since we are testing on edits, predicting the input_sequence is always wrong. Thus skip it!
if predictions[used_prediction_idx][0] != datasample.input_sequence:
break
else:
raise Exception('All output sequences are identical to input sequences. This cannot happen.')
if predictions[used_prediction_idx][0] != datasample.output_sequence:
num_errors += 1
print(f'Matched {100 * (1 - num_errors / len(test_data))}% samples.')
def is_num_literal(token: str) -> bool:
try:
# Numeric literals come in too many flavors, use Python's tokenizer
return next(tokenize.generate_tokens(StringIO(token).readline)).type == tokenize.NUMBER
except:
return False
def run(arguments):
azure_info_path = arguments.get('--azure-info', None)
model_path = RichPath.create(arguments['MODEL_FILENAME'], azure_info_path)
if arguments['--cpu']:
model = BaseComponent.restore_model(model_path, 'cpu')
else:
model = BaseComponent.restore_model(model_path)
test_data_path = RichPath.create(arguments['TEST_DATA'], azure_info_path)
test_data = load_data_by_type(test_data_path, arguments['--data-type'])
test_size = arguments.get('--test-size')
if not test_size:
test_size = len(test_data)
else:
test_size = int(test_size)
test_data = test_data[:test_size]
logging.info('Running test on %d examples', test_size)
test(model, test_data, model.name, arguments['--data-type'] , not arguments['--no-prediction'],
arguments['--greedy'], arguments['--verbose'])
if __name__ == '__main__':
args = docopt(__doc__)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
run_and_debug(lambda: run(args), args.get('--debug', False))
|
#!/usr/bin/env python
"""
Usage:
testencdec.py [options] MODEL_FILENAME TEST_DATA
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
--data-type=<type> The type of data to be used. Possible options fce, code, wikiatomicedits, wikiedits. [default: fce]
--no-prediction Do not ask the model to make predictions.
--test-size=<size> Size of test set to use. Only need to specify if less than total.
--greedy Use greedy decoding rather than beam search.
--cpu Use cpu only.
--verbose Print predictions to console.
--quiet Do not show progress bar.
-h --help Show this screen.
--debug Enable debug routines. [default: False]
"""
import logging
from typing import List
from itertools import islice
import numpy as np
from docopt import docopt
from dpu_utils.utils import run_and_debug, RichPath
from sklearn.metrics import classification_report
from data.diffviz import diff
from data.edits import Edit, NLEdit
from data.editevaluator import EditEvaluator
from data.loading import load_data_by_type
from io import StringIO
from dpu_utils.ptutils import BaseComponent
from nltk.translate.bleu_score import corpus_bleu, sentence_bleu, SmoothingFunction
import os
import re
import sys
import tokenize
import torch
IDENTIFER = 'IDENTIFIER'
NUMBER = 'NUM'
STRING = 'STR'
SMOOTHING_FUNCTION = SmoothingFunction().method2
REF_FILE = 'ref.txt'
ORIG_FILE = 'orig.txt'
PRED_FILE = 'pred.txt'
GOLD_EDIT_M2 = 'gold_edit_m2'
PRED_EDIT_M2 = 'pred_edit_m2'
def test(model: BaseComponent, test_data: List[Edit], model_name: str,
data_type: str, test_predictions: bool, greedy: bool, verbose: bool, mb_size=2):
model.eval()
all_data = [model.load_data_from_sample(d) for d in test_data]
if 'context' in data_type and not model.get_hyperparameter('disable_context_copy'):
ground_input_sequences = [d.input_sequence + d.context_sequence for d in test_data]
else:
ground_input_sequences = [d.input_sequence for d in test_data]
data_iter = iter(all_data)
predictions = []
is_full = True
gold_likelihood_values = []
start_idx = 0
while is_full:
ground_mb_data = list(islice(data_iter, mb_size))
is_full = len(ground_mb_data) == mb_size
mb_data, _, num_elements = model.create_minibatch(ground_mb_data, max_num_items=mb_size)
mb_ground_input_sequences= ground_input_sequences[start_idx:start_idx + num_elements]
if num_elements > 0:
if test_predictions:
logging.info('Before decoding predictions: %d', len(predictions))
if greedy:
predicted_outputs = model.greedy_decode(input_sequences=mb_data['input_sequences'],
ground_input_sequences=mb_ground_input_sequences)
else:
predicted_outputs = model.beam_decode(input_sequences=mb_data['input_sequences'],
ground_input_sequences=mb_ground_input_sequences)
predictions.extend(predicted_outputs)
logging.info('After decoding predictions: %d', len(predictions))
with torch.no_grad():
gold_likelihood_values.extend(model.compute_likelihood(**mb_data).cpu().numpy())
start_idx += num_elements
if not is_full:
break
if test_predictions:
assert len(all_data) == len(predictions)
run_test_suite(predictions, test_data, gold_likelihood_values,
data_type, model_name, verbose)
def __join_sentence(token_list: List[str]) -> str:
s = ''
in_bpe = False
for t in token_list:
if t == '__sow':
in_bpe = True
elif t == '__eow':
in_bpe = False
s += ' '
elif in_bpe:
s += t
else:
s += t + ' '
return s
def run_test_suite(predictions, test_data, gold_likelihood_values, data_type='code', model_name='', verbose=False):
gold_likelihood_values = np.array(gold_likelihood_values)
gold_probs = np.exp(gold_likelihood_values)
if verbose:
for i, (datasample, candidates) in enumerate(zip(test_data, predictions)):
print('Link: {}\n'.format(datasample.provenance))
print('\nCode Input: ')
print(' '.join(datasample.input_sequence))
print('\n\nGold (Log prob:{}, Prob:{}):'.format(gold_likelihood_values[i], gold_probs[i]))
print(' '.join(datasample.output_sequence))
for predicted_tokens, score in zip(*candidates):
print('\nPredicted (Log prob:{}, Prob:{}):'.format(score, np.exp(score)))
print(' '.join(predicted_tokens))
print('--------------------------')
sys.stdout.flush()
exact_match_errors = 0
structural_match_errors = 0
input_copy_ranks = []
gold_ranks = []
recalled_in_beam = []
num_gold_tokens = []
orig_instances = []
references = []
selected_predictions = []
edit_evaluator = EditEvaluator()
for i, (datasample, (predicted_sequences, _)) in enumerate(zip(test_data, predictions)):
if len(predicted_sequences) == 1:
used_prediction_idx = 0
else:
for used_prediction_idx in range(len(predicted_sequences)):
# Since we are testing on edits, predicting the input_sequence is always wrong. Thus skip it!
if predicted_sequences[used_prediction_idx] != datasample.input_sequence:
break
else:
raise Exception('All output sequences are identical to input sequences. This cannot happen.')
beam_strings = [' '.join(s) for s in predicted_sequences]
gold_str = ' '.join(datasample.output_sequence)
try:
gold_ranks.append(1./ float(1 + beam_strings.index(gold_str)))
recalled_in_beam.append(1)
except ValueError:
gold_ranks.append(0)
recalled_in_beam.append(0)
input_str = ' '.join(datasample.input_sequence)
try:
input_copy_ranks.append(1./ float(beam_strings.index(input_str) + 1))
except ValueError:
input_copy_ranks.append(0)
if predicted_sequences[used_prediction_idx] != datasample.output_sequence:
exact_match_errors += 1
num_gold_tokens.append(len(datasample.output_sequence))
gold_token_types = get_token_types(datasample.output_sequence)
predicted_token_types = get_token_types(predicted_sequences[used_prediction_idx])
if gold_token_types != predicted_token_types:
structural_match_errors += 1
references.append([datasample.output_sequence])
orig_instances.append(datasample.input_sequence)
selected_predictions.append(predicted_sequences[used_prediction_idx])
edit_evaluator.add_sample(datasample.input_sequence,
datasample.output_sequence,
predicted_sequences[used_prediction_idx])
if len(selected_predictions) > 0:
with open(model_name + '_' + REF_FILE, 'w+') as f:
for r in references:
f.write(' '.join(r[0]) + '\n')
with open(model_name + '_' + ORIG_FILE, 'w+') as f:
for o in orig_instances:
f.write(' '.join(o) + '\n')
with open(model_name + '_' + PRED_FILE, 'w+') as f:
for s in selected_predictions:
f.write(' '.join(s) + '\n')
orig_sentence_bleu = compute_bleu(references, orig_instances)
pred_sentence_bleu = compute_bleu(references, selected_predictions)
if len(gold_ranks) == 0:
avg_gold_rank = 0.0
else:
avg_gold_rank = sum(gold_ranks)/float(len(gold_ranks))
if len(input_copy_ranks) == 0:
avg_input_copy_rank = 0.0
else:
avg_input_copy_rank = sum(input_copy_ranks)/float(len(input_copy_ranks))
logging.info('Exact match: {}%'.format(100 * (1 - exact_match_errors / len(test_data))))
logging.info('Structural exact match: {}%'.format(100 * (1 - structural_match_errors / len(test_data))))
logging.info('Average gold log likelihood: {}'.format(sum(gold_likelihood_values)/len(test_data)))
xent = gold_likelihood_values / np.array(num_gold_tokens)
logging.info('Average gold x-ent: {}'.format(-sum(xent)/len(test_data)))
logging.info('Average gold probability: {}'.format(sum(gold_probs)/len(test_data)))
logging.info('Accuracy @ beam-size: {}%'.format(
100 * float(sum(recalled_in_beam))/len(recalled_in_beam)))
logging.info('MRR of gold output sequence: {}'.format(avg_gold_rank))
logging.info('Input sequence is a candidate: {}%'.format(
100 * float(len(input_copy_ranks))/len(test_data)))
logging.info('MRR of input sequence: {}'.format(avg_input_copy_rank))
logging.info('Original avg sentence BLEU: {}'.format(orig_sentence_bleu))
logging.info('Prediction avg sentence BLEU: {}'.format(pred_sentence_bleu))
for stat, val in edit_evaluator.evaluation_statistics().items():
logging.info('{}: {}'.format(stat, val))
logging.info('Total: {}'.format(len(test_data)))
logging.info('Model: {}'.format(model_name))
def compute_bleu(references, hypotheses):
sentence_scores = []
for ref, hyp in zip(references, hypotheses):
sentence_scores.append(sentence_bleu(ref, hyp, smoothing_function=SMOOTHING_FUNCTION))
return 100*sum(sentence_scores)/len(sentence_scores)
def get_token_types(tokens):
token_types = []
for token in tokens:
if re.match('[a-zA-Z_][a-zA-Z0-9_]*', token) or token == '%UNK%':
token_types.append(IDENTIFER)
elif is_num_literal(token):
token_types.append(NUMBER)
elif re.match(r'(["\'])(?:(?=(\\?))\2.)*?\1', token):
token_types.append(STRING)
else:
token_types.append(token)
return token_types
def compute_prediction_accuracy(predictions, test_data):
num_errors = 0
for i, (datasample, predictions) in enumerate(zip(test_data, predictions)):
for used_prediction_idx in range(len(predictions)):
# Since we are testing on edits, predicting the input_sequence is always wrong. Thus skip it!
if predictions[used_prediction_idx][0] != datasample.input_sequence:
break
else:
raise Exception('All output sequences are identical to input sequences. This cannot happen.')
if predictions[used_prediction_idx][0] != datasample.output_sequence:
num_errors += 1
print(f'Matched {100 * (1 - num_errors / len(test_data))}% samples.')
def is_num_literal(token: str) -> bool:
try:
# Numeric literals come in too many flavors, use Python's tokenizer
return next(tokenize.generate_tokens(StringIO(token).readline)).type == tokenize.NUMBER
except:
return False
def run(arguments):
azure_info_path = arguments.get('--azure-info', None)
model_path = RichPath.create(arguments['MODEL_FILENAME'], azure_info_path)
if arguments['--cpu']:
model = BaseComponent.restore_model(model_path, 'cpu')
else:
model = BaseComponent.restore_model(model_path)
test_data_path = RichPath.create(arguments['TEST_DATA'], azure_info_path)
test_data = load_data_by_type(test_data_path, arguments['--data-type'])
test_size = arguments.get('--test-size')
if not test_size:
test_size = len(test_data)
else:
test_size = int(test_size)
test_data = test_data[:test_size]
logging.info('Running test on %d examples', test_size)
test(model, test_data, model.name, arguments['--data-type'] , not arguments['--no-prediction'],
arguments['--greedy'], arguments['--verbose'])
if __name__ == '__main__':
args = docopt(__doc__)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
run_and_debug(lambda: run(args), args.get('--debug', False))
|
#!/usr/bin/env python
"""
Usage:
train.py [options] TRAIN_DATA_PATH VALID_DATA_PATH MODEL_TYPE TARGET_MODEL_FILENAME
train.py [options] --split-valid TRAIN_DATA_PATH MODEL_TYPE TARGET_MODEL_FILENAME
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
--data-type=<type> The type of data to be used. Possible options fce, code, wikiatomicedits. [default: fce]
--max-num-epochs=<epochs> The maximum number of epochs to run training for. [default: 100]
--minibatch-size=<size> The minibatch size. [default: 200]
--validation-pct=<pct> The percent of the data to keep as validation if a validation set is not explicitly given. [default: 0.1]
--restore_path=<path> The path to previous model file for starting from previous checkpoint.
--quiet Do not show progress bar.
-h --help Show this screen.
--debug Enable debug routines. [default: False]
"""
import logging
from docopt import docopt
from dpu_utils.utils import run_and_debug, RichPath
from data.loading import load_data_by_type
from dpu_utils.ptutils import BaseComponent, ComponentTrainer
from model.editrepresentationmodels import create_copy_seq2seq_model, \
create_seq2seq_with_span_copy_model, \
create_base_copy_seq2seq_model, \
create_base_seq2seq_with_span_copy_model, \
create_gru_lm
ALL_MODELS = {
'copyseq2seq': create_copy_seq2seq_model,
'basecopyseq2seq': create_base_copy_seq2seq_model,
'copyseq2seq-bidi': lambda: create_copy_seq2seq_model(bidirectional=True),
'gru-lm': create_gru_lm,
'basecopyspan': create_base_seq2seq_with_span_copy_model,
'copyspan': create_seq2seq_with_span_copy_model,
'copyspan-bidi': lambda:create_seq2seq_with_span_copy_model(bidirectional=True),
}
def run(arguments):
azure_info_path = arguments.get('--azure-info', None)
training_data_path = RichPath.create(arguments['TRAIN_DATA_PATH'], azure_info_path)
training_data = load_data_by_type(training_data_path, arguments['--data-type'], as_list=arguments['--split-valid'])
if not arguments['--split-valid']:
validation_data_path = RichPath.create(arguments['VALID_DATA_PATH'], azure_info_path)
validation_data = load_data_by_type(validation_data_path, arguments['--data-type'], as_list=False)
else:
logging.info('No validation set provided. One will be carved out from the training set.')
logging.warning('Lazy loading does not work when using --split-valid')
validation_pct = 1 - float(arguments['--validation-pct'])
assert 0 < validation_pct < 1, 'Validation Split should be in (0,1)'
training_data, validation_data = training_data[:int(validation_pct * len(training_data))], training_data[int(validation_pct * len(training_data)):]
model_path = RichPath.create(arguments['TARGET_MODEL_FILENAME'], azure_info_path)
model_name = arguments['MODEL_TYPE']
initialize_metadata = True
restore_path = arguments.get('--restore_path', None)
if restore_path:
model = BaseComponent.restore_model(RichPath.create(restore_path, azure_info_path))
initialize_metadata = False
elif model_name in ALL_MODELS:
model = ALL_MODELS[model_name]()
else:
raise ValueError(f'Unrecognized model tyoe {model_name}. Available names: {ALL_MODELS.keys()}')
trainer = ComponentTrainer(model, model_path, max_num_epochs=int(arguments['--max-num-epochs']),
minibatch_size=int(arguments['--minibatch-size']))
trainer.train(training_data, validation_data, show_progress_bar=not arguments['--quiet'],
initialize_metadata=initialize_metadata)
if __name__ == '__main__':
args = docopt(__doc__)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
run_and_debug(lambda: run(args), args.get('--debug', False))
|
import sys
import streamlit as st
import matplotlib.pyplot as plt
import numpy as np
from dpu_utils.utils import RichPath
from data.edits import Edit
from dpu_utils.ptutils import BaseComponent
'''
# Copy Span Visualization
'''
model_path = sys.argv[1]
@st.cache
def get_model(filename):
path = RichPath.create(filename)
model = BaseComponent.restore_model(path, device='cpu')
model.eval()
return model
st.markdown(f'> Using model from {model_path}')
before_tokens = st.text_area('Input (space) tokenized before version.').strip().split()
after_tokens = st.text_area('Input (space) tokenized after version.').strip().split()
'''
#### Input Data
'''
edit = Edit(input_sequence=before_tokens, output_sequence=after_tokens, provenance='', edit_type='')
st.write(edit)
model = get_model(model_path)
tensorized_data = [model.load_data_from_sample(edit)]
mb_data, is_full, num_elements = model.create_minibatch(tensorized_data, max_num_items=10)
assert num_elements == 1
ground_input_sequence = [edit.input_sequence]
predicted_outputs = model.beam_decode(input_sequences=mb_data['input_sequences'],
ground_input_sequences=ground_input_sequence)[0]
ll, debug_info = model.compute_likelihood(**mb_data, return_debug_info=True)
ll = ll.cpu().numpy()
st.markdown(f' > Likelihood of target edit {ll[0]:.2f}')
copy_span_logprobs = debug_info['copy_span_logprobs'][0]
gen_logprobs = debug_info['generation_logprobs'][0]
vocabulary = debug_info['vocabulary']
before_tokens = ['<s>'] + before_tokens + ['</s>']
after_tokens = after_tokens + ['</s>']
for i in range(copy_span_logprobs.shape[0]):
st.markdown(f'### At position {i}: "{after_tokens[i]}"')
st.markdown(f'Current context `{["<s>"] + after_tokens[:i]}`')
plt.figure(figsize=[1, 1])
current_copy_span_probs = np.exp(copy_span_logprobs[i])
plt.matshow(current_copy_span_probs, cmap='Greys')
plt.xticks(range(copy_span_logprobs.shape[1]), before_tokens, fontsize=8, rotation=90)
plt.xlabel('Start Span Pos')
plt.yticks(range(copy_span_logprobs.shape[2]), before_tokens, fontsize=8, rotation=0)
plt.ylabel('End Span Pos')
plt.colorbar()
st.pyplot()
max_idx = np.argmax(current_copy_span_probs)
from_idx, to_idx = max_idx // current_copy_span_probs.shape[1], max_idx % current_copy_span_probs.shape[1],
st.markdown(f'* Best copy suggestion: `Copy({from_idx}:{to_idx+1})` with prob {np.max(current_copy_span_probs)*100:.1f}%, _i.e._ `Copy({before_tokens[from_idx: to_idx+1]})`.')
st.markdown(f'* Best generation suggestion: `Gen("{vocabulary.get_name_for_id(np.argmax(gen_logprobs[i]))}")` with prob {np.exp(np.max(gen_logprobs[i]))*100:.1f}%')
'''### Beam decoding results '''
for i, (prediction, logprob) in enumerate(zip(predicted_outputs[0], predicted_outputs[1])):
if i > 2:
break
st.markdown(f'* {" ".join(prediction)} ({np.exp(logprob)*100:.1f}%)')
|
#!/usr/bin/env python3
"""
Save the edit representations
Usage:
exportrepresentations.py [options] MODEL_FILENAME DATA OUT_FILE
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
--data-type=<type> The type of data to be used. Possible options fce, code, wikiatomicedits, wikiedits. [default: fce]
--cpu Use cpu only.
--quiet Do not show progress bar.
-h --help Show this screen.
--debug Enable debug routines. [default: False]
"""
import logging
from typing import List
import numpy as np
from docopt import docopt
from dpu_utils.utils import run_and_debug, RichPath
from data.edits import Edit
from data.loading import load_data_by_type
from dpu_utils.ptutils import BaseComponent
def export(model: BaseComponent, test_data: List[Edit], output_path: str):
model.eval()
logging.info('Tensorizing data...')
all_data = [model.load_data_from_sample(d) for d in test_data]
data_iter = iter(all_data)
representations = []
continue_iterating = True
logging.info('Computing edit representation on %d examples', len(test_data))
start_idx = 0
while continue_iterating:
mb_data, continue_iterating, num_elements = model.create_minibatch(data_iter, max_num_items=200)
if num_elements > 0:
representations.extend(model.edit_encoder.get_summary(input_sequence_data=mb_data['aligned_edits']))
else:
assert not continue_iterating
start_idx += num_elements
assert len(representations) == len(all_data)
np.savez_compressed(output_path, representations=representations)
def run(arguments):
azure_info_path = arguments.get('--azure-info', None)
model_path = RichPath.create(arguments['MODEL_FILENAME'], azure_info_path)
if arguments['--cpu']:
model = BaseComponent.restore_model(model_path, 'cpu')
else:
model = BaseComponent.restore_model(model_path)
test_data_path = RichPath.create(arguments['DATA'], azure_info_path)
test_data = load_data_by_type(test_data_path, arguments['--data-type'], cleanup=False)
export(model, test_data, arguments['OUT_FILE'])
if __name__ == '__main__':
args = docopt(__doc__)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
run_and_debug(lambda: run(args), args.get('--debug', False))
|
#!/usr/bin/env python
"""
Usage:
score.py [options] MODEL_FILENAME
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
--cpu Use cpu only.
--verbose Print predictions to console.
--quiet Do not show progress bar.
-h --help Show this screen.
--debug Enable debug routines. [default: False]
"""
import logging
from typing import Tuple
from docopt import docopt
from dpu_utils.utils import run_and_debug, RichPath
from data.edits import Edit
from dpu_utils.ptutils import BaseComponent
def score(model: BaseComponent, sample_edit: Edit) -> Tuple[float, float]:
model.eval()
ground_mb_data = [model.load_data_from_sample(sample_edit)]
mb_data, _, _ = model.create_minibatch(ground_mb_data, max_num_items=1)
log_likelihood = float(model.compute_likelihood(**mb_data))
return log_likelihood, log_likelihood / len(sample_edit.output_sequence)
def run(arguments):
azure_info_path = arguments.get('--azure-info', None)
model_path = RichPath.create(arguments['MODEL_FILENAME'], azure_info_path)
if arguments['--cpu']:
model = BaseComponent.restore_model(model_path, 'cpu')
else:
model = BaseComponent.restore_model(model_path)
sample_edit = Edit(
input_sequence = ["var", "VAR0", "=", "(", "Math", ".", "Abs", "(", "VAR1", ".", "GetHashCode", "(", ")", ")", "%", "VAR2", ")", ";" ],
output_sequence=[ "var", "VAR0", "=", "Math", ".", "Abs", "(", "MurmurHash", ".", "StringHash", "(", "VAR1", ")", ")", "%", "VAR2", ";" ],
provenance="",
edit_type=[]
)
sample_logprob, sample_per_token_entropy = score(model, sample_edit)
print(f'Log prob {sample_logprob:.2f} Per token entropy: {sample_per_token_entropy:.2f}')
if __name__ == '__main__':
args = docopt(__doc__)
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
run_and_debug(lambda: run(args), args.get('--debug', False))
|
import logging
import numpy as np
from dpu_utils.utils import run_and_debug, RichPath
from data.representationviz import RepresentationsVisualizer
from data.synthetic.charedits import get_dataset
from editrepcomponents.alignededitencoder import AlignedEditTokensEmbedding
from dpu_utils.ptutils import ComponentTrainer, BaseComponent
from mlcomponents.seqencoder import BiGruSequenceEncoder
from editrepcomponents.copyeditor import CopyEditor
from mlcomponents.embeddings import TokenSequenceEmbedder
from mlcomponents.seqdecoding import GruCopyingDecoder
from mlcomponents.seqdecoding import LuongAttention
logging.basicConfig(level=logging.INFO,
format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
def run():
dataset = get_dataset()
logging.info('Generated %s synthetic datapoints.', len(dataset))
training_set, validation_set = dataset[:int(.8 * len(dataset))], dataset[int(.8 * len(dataset)):]
seq_embeddings = TokenSequenceEmbedder('SeqTokenEmbedder', hyperparameters={'max_seq_length': 12, 'dropout_rate':0})
input_sequence_encoder = BiGruSequenceEncoder('BiGruEncoder',
token_embedder=seq_embeddings,
hyperparameters={
'num_layers':2,
'hidden_size': 61,
})
attention = LuongAttention('StandardAttention',
hyperparameters={'memories_hidden_dimension': input_sequence_encoder.output_states_size})
edit_token_embeddings = AlignedEditTokensEmbedding('EditEncoder', token_encoder=seq_embeddings)
edit_encoder = BiGruSequenceEncoder('BiGruEditEncoder',
token_embedder=edit_token_embeddings,
hyperparameters={
'num_layers':2,
})
decoder = GruCopyingDecoder('GruCopyingDecoder',
token_encoder=seq_embeddings,
standard_attention=attention,
hyperparameters={'initial_state_size': 244+128,
'memories_hidden_dimension': 122,
'dropout_rate':0,
'additional_inputs_size':64*2,
'max_memories_length': 12})
model = CopyEditor('CopyEditor',
input_sequence_encoder=input_sequence_encoder,
edit_encoder=edit_encoder,
output_sequence_decoder=decoder,
learn_bidirectional_edits=True
)
save_path = RichPath.create('./testmodel-copyseq2seq.pkl.gz')
trainer = ComponentTrainer(model, save_path, max_num_epochs=100, minibatch_size=500)
trainer.train(training_set, validation_set, patience=10)
## Try greedy decoding
model = None
model = BaseComponent.restore_model(save_path) # type: CopyEditor
model.eval()
all_data = [model.load_data_from_sample(d) for d in validation_set]
ground_input_sequences = [d.input_sequence for d in validation_set]
data_iter = iter(all_data)
predictions = []
representations = []
is_full = True
start_idx = 0
while is_full:
mb_data, is_full, num_elements = model.create_minibatch(data_iter, max_num_items=100)
if num_elements > 0:
predictions.extend([s[0] for s in model.beam_decode(input_sequences=mb_data['input_sequences'], aligned_edits=mb_data['aligned_edits'],
ground_input_sequences=ground_input_sequences[start_idx:start_idx+num_elements])])
start_idx += num_elements
representations.extend(model.edit_encoder.get_summary(input_sequence_data=mb_data['aligned_edits']))
if not is_full:
break
assert len(all_data) == len(predictions)
num_errors = 0
for i, (datasample, predictions) in enumerate(zip(validation_set, predictions)):
if predictions[0] != datasample.output_sequence:
print(datasample, predictions)
num_errors += 1
print(f'Matched {100 * (1 - num_errors/len(validation_set))}% samples.')
representations = np.array(representations)
viz = RepresentationsVisualizer(labeler=lambda d:d.edit_type[0])
viz.print_nearest_neighbors(validation_set, representations, num_items=20)
viz.plot_tsne(validation_set, representations, save_file='out.pdf')
if __name__ == '__main__':
run_and_debug(run, True)
|
import logging
import random
import numpy as np
from dpu_utils.utils import run_and_debug, RichPath
from data.representationviz import RepresentationsVisualizer
from data.synthetic.charedits import get_dataset
from editrepcomponents.alignededitencoder import AlignedEditTokensEmbedding
from dpu_utils.ptutils import BaseComponent, ComponentTrainer
from mlcomponents.seqdecoding.spancopydecoder import GruSpanCopyingDecoder
from mlcomponents.seqencoder import BiGruSequenceEncoder
from editrepcomponents.copyeditor import CopyEditor
from mlcomponents.embeddings import TokenSequenceEmbedder
from mlcomponents.seqdecoding import LuongAttention
logging.basicConfig(level=logging.INFO,
format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
def run():
greedy_decoding = False
np.random.seed(1)
random.seed(1)
dataset = get_dataset()
logging.info('Generated %s synthetic datapoints.', len(dataset))
training_set, validation_set = dataset[:int(.8 * len(dataset))], dataset[int(.8 * len(dataset)):]
seq_embeddings = TokenSequenceEmbedder('SeqTokenEmbedder', hyperparameters={'max_seq_length': 12, 'dropout_rate':0, 'min_word_count_threshold': 1})
input_sequence_encoder = BiGruSequenceEncoder('BiGruEncoder',
token_embedder=seq_embeddings,
hyperparameters={
'num_layers':2,
'hidden_size': 61,
})
attention = LuongAttention('StandardAttention',
hyperparameters={'memories_hidden_dimension': input_sequence_encoder.output_states_size})
copy_attention = LuongAttention('StandardAttention',
hyperparameters={'memories_hidden_dimension': input_sequence_encoder.output_states_size})
edit_token_embeddings = AlignedEditTokensEmbedding('EditEncoder', token_encoder=seq_embeddings)
edit_encoder = BiGruSequenceEncoder('BiGruEditEncoder',
token_embedder=edit_token_embeddings,
hyperparameters={
'num_layers':3,
})
decoder = GruSpanCopyingDecoder('GruSpanCopyingDecoder',
token_encoder=seq_embeddings,
standard_attention=attention,
copy_attention=copy_attention,
hyperparameters={'initial_state_size': 244+192,
'memories_hidden_dimension': 122,
'dropout_rate':0,
'additional_inputs_size':64*3,
'max_memories_length': 12})
model = CopyEditor('CopyEditor',
input_sequence_encoder=input_sequence_encoder,
edit_encoder=edit_encoder,
output_sequence_decoder=decoder,
learn_bidirectional_edits=True
)
save_path = RichPath.create('./testmodel-copyspan.pkl.gz')
trainer = ComponentTrainer(model, save_path, max_num_epochs=50, minibatch_size=500)
trainer.train(training_set, validation_set, patience=10)
## Try greedy decoding
model = None
model = BaseComponent.restore_model(save_path) # type: CopyEditor
model.eval()
all_data = [model.load_data_from_sample(d) for d in validation_set]
ground_input_sequences = [d.input_sequence for d in validation_set]
data_iter = iter(all_data)
predictions = []
representations = []
is_full = True
start_idx = 0
while is_full:
mb_data, is_full, num_elements = model.create_minibatch(data_iter, max_num_items=100)
if num_elements > 0:
if greedy_decoding:
mb_predictions = [s for s in model.greedy_decode(input_sequences=mb_data['input_sequences'],
aligned_edits=mb_data['aligned_edits'],
ground_input_sequences=ground_input_sequences[
start_idx:start_idx + num_elements])]
else:
mb_predictions = [s for s in model.beam_decode(input_sequences=mb_data['input_sequences'], aligned_edits=mb_data['aligned_edits'],
ground_input_sequences=ground_input_sequences[start_idx:start_idx+num_elements])]
predictions.extend(mb_predictions)
start_idx += num_elements
representations.extend(model.edit_encoder.get_summary(input_sequence_data=mb_data['aligned_edits']))
if not is_full:
break
assert len(all_data) == len(predictions)
num_errors_at_1 = 0
num_errors_at_5 = 0
for i, (datasample, predictions) in enumerate(zip(validation_set, predictions)):
if predictions[0][0] != datasample.output_sequence:
print(datasample, predictions)
num_errors_at_1 += 1
if not any(predictions[i][0] == datasample.output_sequence for i in range(len(predictions))):
num_errors_at_5 += 1
print(f'Matched @1 {100 * (1 - num_errors_at_1/len(validation_set))}% samples.')
print(f'Matched @5 {100 * (1 - num_errors_at_5/len(validation_set))}% samples.')
representations = np.array(representations)
viz = RepresentationsVisualizer(labeler=lambda d:d.edit_type)
viz.print_nearest_neighbors(validation_set, representations, num_items=20)
# viz.plot_tsne(validation_set, representations, save_file='out.pdf')
run_and_debug(run, True)
|
import logging
from typing import Set
from dpu_utils.utils import run_and_debug, RichPath
from data.edits import Edit
from dpu_utils.ptutils import ComponentTrainer
from mlcomponents.seqencoder import BiGruSequenceEncoder
from mlcomponents.embeddings import TokenSequenceEmbedder
from mlcomponents.encoderdecoder import EncoderDecoder
from mlcomponents.seqdecoding import GruDecoder
from mlcomponents.seqdecoding import LuongAttention
logging.basicConfig(level=logging.INFO,
format='%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s')
all_letters = [chr(65+i) for i in range(26)] + [chr(97+i) for i in range(26)] \
+ [chr(913+i) for i in range(25)] +[chr(945+i) for i in range(25)] # Add greek letters
def get_at(pos: int, length: int) -> Edit:
before = tuple(all_letters[j % len(all_letters)] for j in range(pos, pos + length))
after = tuple(all_letters[j % len(all_letters)] for j in range(pos + length, pos + length * 2))
return Edit(input_sequence=before, output_sequence=after, edit_type='', provenance='')
def get_dataset() -> Set[Edit]:
dataset = set() # type: Set[Edit]
for i in range(len(all_letters)):
for l in range(2, 6):
dataset.add(get_at(i, l))
return dataset
def run():
dataset = list(Edit(
input_sequence=list(e.input_sequence),
output_sequence=list(e.output_sequence),
edit_type=e.edit_type,
provenance=e.provenance
) for e in get_dataset())
seq_embeddings = TokenSequenceEmbedder('SeqTokenEmbedder', hyperparameters={'max_seq_length': 7, 'dropout_rate': 0})
input_encoder = BiGruSequenceEncoder('BiGruEncoder', seq_embeddings)
attention = LuongAttention('StandardAttention', hyperparameters={
'memories_hidden_dimension': input_encoder.output_states_size
})
decoder = GruDecoder('GruDecoder', seq_embeddings, standard_attention=attention,
hyperparameters={
'initial_state_size': 64
})
model = EncoderDecoder('EncoderDecoder',
input_sequence_encoder=input_encoder,
output_sequence_decoder=decoder
)
trainer = ComponentTrainer(model, RichPath.create('./testmodel.pkl.gz'), max_num_epochs=500)
trainer.train(dataset, dataset, patience=50)
## Try greedy decoding
model = trainer.model
model.eval()
all_data = [model.load_data_from_sample(d) for d in dataset]
data_iter = iter(all_data)
predictions = []
is_full = True
while is_full:
mb_data, is_full, _ = model.create_minibatch(data_iter, max_num_items=100)
predictions.extend(model.greedy_decode(input_sequences=mb_data['input_sequences']))
if not is_full:
break
assert len(all_data) == len(predictions)
num_errors = 0
for i, (datasample, predictions) in enumerate(zip(dataset, predictions)):
if predictions[0] != datasample.output_sequence:
print(f'{i} not matching data sample: {datasample}')
print(f'Predicted: {predictions}')
print('----------')
num_errors += 1
print(f'Matched {100 * (1 - num_errors/len(dataset))}% samples.')
run_and_debug(run, True)
|
import logging
from typing import Optional, Dict, Any, List, Tuple, NamedTuple
import torch
from data.edits import Edit
from dpu_utils.ptutils import BaseComponent
from mlcomponents.seqdecoding import SeqDecoder
from mlcomponents.seqencoder import SequenceEncoder
class EncoderDecoder(BaseComponent):
LOGGER = logging.getLogger('EncoderDecoder')
def __init__(self, name: str, input_sequence_encoder: SequenceEncoder,
output_sequence_decoder: SeqDecoder,
hyperparameters: Optional[Dict[str, Any]] = None) -> None:
super(EncoderDecoder, self).__init__(name, hyperparameters)
self.__input_sequence_encoder = input_sequence_encoder
self.__output_sequence_decoder = output_sequence_decoder
@classmethod
def default_hyperparameters(cls) -> Dict[str, Any]:
return { }
def _finalize_component_metadata_and_model(self) -> None:
pass
@property
def input_sequence_encoder(self):
return self.__input_sequence_encoder
@property
def output_sequence_decoder(self):
return self.__output_sequence_decoder
def _load_metadata_from_sample(self, data_to_load: Edit) -> None:
self.__input_sequence_encoder.load_metadata_from_sample(data_to_load.input_sequence)
self.__output_sequence_decoder.load_metadata_from_sample(SeqDecoder.InputOutputSequence(
input_sequence=data_to_load.input_sequence,
output_sequence=data_to_load.output_sequence
))
TensorizedData = NamedTuple('EncoderDecoderTensorizedData', [
('input_sequence', Any),
('output_sequence', Any),
])
def load_data_from_sample(self, data_to_load: Edit) -> Optional['EncoderDecoder.TensorizedData']:
return self.TensorizedData(
input_sequence=self.__input_sequence_encoder.load_data_from_sample([SeqDecoder.START] + data_to_load.input_sequence + [SeqDecoder.END]),
output_sequence=self.__output_sequence_decoder.load_data_from_sample(SeqDecoder.InputOutputSequence(
input_sequence=[SeqDecoder.START] + data_to_load.input_sequence + [SeqDecoder.END],
output_sequence=data_to_load.output_sequence
))
)
def initialize_minibatch(self) -> Dict[str, Any]:
return {
'input_sequences': self.__input_sequence_encoder.initialize_minibatch(),
'output_sequences': self.__output_sequence_decoder.initialize_minibatch(),
}
def extend_minibatch_by_sample(self, datapoint: 'EncoderDecoder.TensorizedData', accumulated_minibatch_data: Dict[str, Any]) -> bool:
continue_extending = self.__input_sequence_encoder.extend_minibatch_by_sample(
datapoint=datapoint.input_sequence,
accumulated_minibatch_data=accumulated_minibatch_data['input_sequences'])
continue_extending &= self.__output_sequence_decoder.extend_minibatch_by_sample(
datapoint=datapoint.output_sequence,
accumulated_minibatch_data=accumulated_minibatch_data['output_sequences'])
return continue_extending
def finalize_minibatch(self, accumulated_minibatch_data: Dict[str, Any]) -> Dict[str, Any]:
return {
'input_sequences': self.__input_sequence_encoder.finalize_minibatch(accumulated_minibatch_data['input_sequences']),
'output_sequences': self.__output_sequence_decoder.finalize_minibatch(accumulated_minibatch_data['output_sequences'])
}
def forward(self, *, input_sequences: Dict[str, Any], output_sequences: Dict[str, Any]):
input_encoding = self.__input_sequence_encoder.forward(
input_sequence_data=input_sequences,
return_embedded_sequence=True
)
memories, memories_lengths, output_state, input_sequence_token_embeddings = input_encoding
decoder_loss = self.__output_sequence_decoder.forward(memories=memories, memories_lengths=memories_lengths,
initial_state=output_state,
input_sequence_token_embeddings=input_sequence_token_embeddings,
**output_sequences)
return decoder_loss
def greedy_decode(self, input_sequences: Dict[str, Any],
ground_input_sequences: List[List[str]], max_length: int=50) -> List[Tuple[List[List[str]], List[float]]]:
with torch.no_grad():
ground_input_sequences, initial_state, memories, memory_lengths = self.__prepare_decoding(ground_input_sequences,
input_sequences)
return self.__output_sequence_decoder.greedy_decode(memories, memory_lengths,
initial_state=initial_state, max_length=max_length,
memories_str_representations=[[SeqDecoder.START] + g + [SeqDecoder.END] for g in ground_input_sequences])
def beam_decode(self, input_sequences: Dict[str, Any],
ground_input_sequences: List[List[str]], max_length: int=150) -> List[Tuple[List[List[str]], List[float]]]:
with torch.no_grad():
ground_input_sequences, initial_state, memories, memory_lengths = self.__prepare_decoding(ground_input_sequences,
input_sequences)
return self.__output_sequence_decoder.beam_decode(memories, memory_lengths,
initial_state=initial_state, max_length=max_length,
memories_str_representations=[[SeqDecoder.START] + g + [SeqDecoder.END] for g in ground_input_sequences],
)
def __prepare_decoding(self, ground_input_sequences, input_sequences):
memories, memory_lengths, output_state = self.__input_sequence_encoder.forward(
input_sequence_data=input_sequences)
return ground_input_sequences, output_state, memories, memory_lengths
def compute_likelihood(self, *, input_sequences: Dict[str, Any], output_sequences: Dict[str, Any],
return_debug_info: bool = False):
with torch.no_grad():
memories, memories_lengths, output_state = self.__input_sequence_encoder.forward(input_sequence_data=input_sequences)
return self.__output_sequence_decoder.compute_likelihood(memories=memories,
memories_lengths=memories_lengths,
initial_state=output_state,
return_debug_info= return_debug_info,
**output_sequences)
|
from .sequenceencoder import SequenceEncoder
from .bigruencoder import BiGruSequenceEncoder
__all__ = [SequenceEncoder, BiGruSequenceEncoder]
|
from typing import Optional, Dict, Any, Tuple, Union
import torch
from torch import nn
from torch.nn.utils.rnn import pad_packed_sequence
from mlcomponents.embeddings import SequenceEmbedder
from .sequenceencoder import SequenceEncoder
class BiGruSequenceEncoder(SequenceEncoder):
def __init__(self, name: str, token_embedder: SequenceEmbedder,
hyperparameters: Optional[Dict[str, Any]]=None) -> None:
super(BiGruSequenceEncoder, self).__init__(name, token_embedder, hyperparameters)
self.__birnn = None # type: Optional[nn.GRU]
def _finalize_component_metadata_and_model(self) -> None:
num_layers = self.get_hyperparameter('num_layers')
hidden_size = self.get_hyperparameter('hidden_size')
self.__birnn = nn.GRU(input_size=self.token_embedder.embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True,
bidirectional=True,
dropout=self.get_hyperparameter('dropout_rate'))
@classmethod
def default_hyperparameters(cls) -> Dict[str, Any]:
return {
'hidden_size': 32,
'num_layers': 1,
'dropout_rate': 0.2
}
@property
def summary_state_size(self) -> int:
return 2 * self.get_hyperparameter('num_layers') * self.get_hyperparameter('hidden_size')
@property
def output_states_size(self) -> int:
return 2 * self.get_hyperparameter('hidden_size')
def forward(self, *, input_sequence_data: Dict[str, Any], return_embedded_sequence: bool=False) \
-> Union[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]:
embedded_tokens, reverse_sortmap = self.token_embedder.forward(**input_sequence_data) # type: PackedSequence
outputs, hn = self.__birnn.forward(embedded_tokens)
outputs, lengths = pad_packed_sequence(outputs, batch_first=True)
lengths = lengths.to(self.device) # Oddly, even if the outputs are on the GPU, these are on the CPU.
outputs = outputs[reverse_sortmap]
lengths = lengths[reverse_sortmap]
# Transform
h_seq = hn.transpose(1, 0) # B x num_layers * 2 x hidden_size
h_seq = h_seq.contiguous().view(h_seq.shape[0], -1) # B x 2 * num_layers * hidden_size
h_seq = h_seq[reverse_sortmap]
if return_embedded_sequence:
embeddings, _ = pad_packed_sequence(embedded_tokens, batch_first=True)
return outputs, lengths, h_seq, embeddings[reverse_sortmap]
return outputs, lengths, h_seq
|
from abc import ABC, abstractmethod
from typing import Optional, Dict, Any, Tuple, List, Union
import torch
from dpu_utils.ptutils import BaseComponent
from mlcomponents.embeddings import SequenceEmbedder
class SequenceEncoder(BaseComponent, ABC):
"""
A general encoder of sequences.
"""
def __init__(self, name: str, token_embedder: SequenceEmbedder,
hyperparameters: Optional[Dict[str, Any]] = None) -> None:
super(SequenceEncoder, self).__init__(name, hyperparameters)
self.__token_embedder = token_embedder # type: SequenceEmbedder
@property
@abstractmethod
def summary_state_size(self) -> int:
pass
@property
@abstractmethod
def output_states_size(self) -> int:
pass
@property
def token_embedder(self) -> SequenceEmbedder:
return self.__token_embedder
def _load_metadata_from_sample(self, data_to_load: List[str]) -> None:
self.token_embedder.load_metadata_from_sample(data_to_load)
def load_data_from_sample(self, data_to_load: List[str]) -> Optional[Any]:
return self.token_embedder.load_data_from_sample(data_to_load)
def initialize_minibatch(self) -> Dict[str, Any]:
return self.token_embedder.initialize_minibatch()
def extend_minibatch_by_sample(self, datapoint: Any, accumulated_minibatch_data: Dict[str, Any]) -> bool:
return self.token_embedder.extend_minibatch_by_sample(datapoint, accumulated_minibatch_data)
def finalize_minibatch(self, accumulated_minibatch_data: Dict[str, Any]) -> Dict[str, Any]:
return self.token_embedder.finalize_minibatch(accumulated_minibatch_data)
@abstractmethod
def forward(self, *, input_sequence_data: Dict[str, Any], return_embedded_sequence: bool=False)\
-> Union[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]]:
"""
:param input_sequence_data:
:return: outputs: B x 2 * hidden_size
lengths: B
hn: batch x summary_output_dim or batch x 2 * num_layers * hidden_size
"""
pass
def get_summary(self, *, input_sequence_data: Dict[str, Any]) -> torch.Tensor:
"""
Returns a BxD output of summaries.
"""
with torch.no_grad():
return self.forward(input_sequence_data=input_sequence_data)[2].cpu().numpy()
|
from abc import ABC, abstractmethod
from typing import Union, Tuple, List, Any
import torch
from dpu_utils.mlutils import Vocabulary
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence
from dpu_utils.ptutils import BaseComponent
class SequenceEmbedder(BaseComponent, ABC):
@property
@abstractmethod
def embedding_size(self) -> int:
pass
@property
@abstractmethod
def vocabulary(self) -> Vocabulary:
pass
@property
@abstractmethod
def embedding_matrix(self) -> torch.Tensor:
pass
@abstractmethod
def _compute_embeddings(self, token_ids: torch.Tensor, lengths: torch.Tensor, add_sequence_related_annotations: bool):
pass
@abstractmethod
def _load_metadata_from_sample(self, data_to_load: List[str]) -> None:
pass
@abstractmethod
def load_data_from_sample(self, data_to_load: List[str]) -> Any:
pass
def forward(self, *, token_ids: torch.Tensor, lengths: torch.Tensor, as_packed_sequence: bool=True,
add_sequence_related_annotations: bool=False) -> Union[Tuple[PackedSequence, torch.Tensor], Tuple[torch.Tensor, torch.Tensor]]:
"""
Convert a B x max_len matrix of integer ids to B x max_len x embedding_size
:param input_token_ids: ? x 'embedding_size'
:param add_sequence_related_annotations Add any sequence_related_annotations (e.g. in positional encodings)
:return: a PackedSequence with batch_first=True and the indices to scatter things back into their original order
if as_packed_sequence=True, otherwise, a tuple of the (embedded tokens, vector of input lengths).
"""
embedded = self._compute_embeddings(token_ids, lengths, add_sequence_related_annotations) # B x max_len x D
if not as_packed_sequence:
return embedded, lengths
sorted_lengths, indices = torch.sort(lengths, descending=True)
# The reverse map, to restore the original order (over batches)
reverse_map = torch.zeros_like(indices, device=self.device)\
.scatter_(dim=0, index=indices, src=torch.arange(indices.shape[0], device=self.device)) # B
return pack_padded_sequence(embedded[indices], sorted_lengths, batch_first=True), reverse_map
|
from .sequenceembedder import SequenceEmbedder
from .tokensequenceembedder import TokenSequenceEmbedder
__all__ = [SequenceEmbedder, TokenSequenceEmbedder] |
import logging
from collections import Counter
import typing
from typing import Optional, Dict, Any, List, NamedTuple
import numpy as np
import torch
from dpu_utils.mlutils import Vocabulary
from torch import nn
from mlcomponents.embeddings.sequenceembedder import SequenceEmbedder
class TokenSequenceEmbedder(SequenceEmbedder):
"""
Component that converts a list of tokens into a fixed-size matrix of embeddings.
"""
LOGGER = logging.getLogger('TokenSequenceEmbedder')
def __init__(self, name: str, hyperparameters: Optional[Dict[str, Any]]=None) -> None:
super(TokenSequenceEmbedder, self).__init__(name, hyperparameters)
self.__metadata_token_counter = None # type: Optional[typing.Counter[str]]
self.__vocabulary = None # type: Optional[Vocabulary]
self.__embedding_layer = None # type: Optional[nn.Embedding]
self.__dropout_layer = None # type: Optional[nn.Dropout]
@classmethod
def default_hyperparameters(cls) -> Dict[str, Any]:
return {'embedding_size': 64,
'max_vocabulary_size': 10000,
'min_word_count_threshold': 7,
'max_seq_length': 30,
'dropout_rate': 0.2,
}
@property
def embedding_size(self) -> int:
return self.get_hyperparameter('embedding_size')
@property
def embedding_matrix(self) -> torch.Tensor:
assert self.__embedding_layer is not None, 'Embeddings have not been initialized.'
return self.__embedding_layer.weight
@property
def vocabulary(self) -> Vocabulary:
return self.__vocabulary
# region Metadata Loading
def _init_component_metadata(self) -> None:
if self.__metadata_token_counter is None:
self.__metadata_token_counter = Counter()
def _load_metadata_from_sample(self, data_to_load: List[str]) -> None:
self.__metadata_token_counter.update(data_to_load[:self.get_hyperparameter('max_seq_length')])
def _finalize_component_metadata_and_model(self) -> None:
if self.__metadata_token_counter is None or self.__vocabulary is not None:
return # This module has already been finalized
token_counter = self.__metadata_token_counter
self.__metadata_token_counter = None
self.__vocabulary = Vocabulary.create_vocabulary(tokens=token_counter,
max_size=self.get_hyperparameter('max_vocabulary_size'),
count_threshold=self.get_hyperparameter('min_word_count_threshold'),
add_pad=True)
self.LOGGER.info('Vocabulary Size of %s is %s', self.name, len(self.__vocabulary))
self.__embedding_layer = nn.Embedding(num_embeddings=len(self.__vocabulary),
embedding_dim=self.get_hyperparameter('embedding_size'),
padding_idx=self.__vocabulary.get_id_or_unk(Vocabulary.get_pad()))
self.__dropout_layer = nn.Dropout(p=self.get_hyperparameter('dropout_rate'))
# endregion
TensorizedData = NamedTuple('EmbeddingTensorizedData', [
('token_ids', np.ndarray),
('length', int)
])
def load_data_from_sample(self, data_to_load: List[str]) -> Optional['TokenSequenceEmbedder.TensorizedData']:
return self.TensorizedData(
token_ids=np.array(self.convert_sequence_to_tensor(data_to_load), dtype=np.int32),
length=min(len(data_to_load), self.get_hyperparameter('max_seq_length'))
)
def convert_sequence_to_tensor(self, token_sequence: List[str]):
return self.__vocabulary.get_id_or_unk_multiple(
tokens=[Vocabulary.get_pad() if t is None else t for t in token_sequence[:self.get_hyperparameter('max_seq_length')]]
)
# region Minibatching
def initialize_minibatch(self) -> Dict[str, Any]:
return {'token_sequence_ids': [], 'lengths': []}
def extend_minibatch_by_sample(self, datapoint: 'TokenSequenceEmbedder.TensorizedData', accumulated_minibatch_data: Dict[str, Any]) -> bool:
accumulated_minibatch_data['token_sequence_ids'].append(datapoint.token_ids)
accumulated_minibatch_data['lengths'].append(datapoint.length)
return True
def finalize_minibatch(self, accumulated_minibatch_data: Dict[str, Any]) -> Dict[str, Any]:
accumulated_token_ids = accumulated_minibatch_data['token_sequence_ids']
max_size = np.max(accumulated_minibatch_data['lengths'])
token_ids = np.zeros((len(accumulated_token_ids), max_size), dtype=np.int32)
for i in range(len(accumulated_token_ids)):
token_ids[i, :len(accumulated_token_ids[i])] = accumulated_token_ids[i]
return {
'token_ids': torch.tensor(token_ids, dtype=torch.int64, device=self.device),
'lengths': torch.tensor(accumulated_minibatch_data['lengths'], dtype=torch.int64, device=self.device)
}
# endregion
def _compute_embeddings(self, token_ids: torch.Tensor, lengths: torch.Tensor, add_sequence_related_annotations: bool):
embedded = self.__embedding_layer(token_ids) # B x max_len x D
return self.__dropout_layer(embedded)
|
from typing import Optional, Dict, Any
import torch
from torch import nn
from dpu_utils.ptutils import BaseComponent
class LuongAttention(BaseComponent):
"""
A Luong-style attention that also includes the inner product of targets-lookup
"""
def __init__(self, name: str, hyperparameters: Optional[Dict[str, Any]]=None) -> None:
super(LuongAttention, self).__init__(name, hyperparameters)
self.__Wcombine = None # type: Optional[nn.Parameter]
self.__Wscore = None # type: Optional[nn.Parameter]
self.__Wpredict = None # type: Optional[nn.Parameter]
@classmethod
def default_hyperparameters(cls) -> Dict[str, Any]:
return {
'memories_hidden_dimension': 64,
'lookup_hidden_dimension': 64,
'output_size': 64
}
def _load_metadata_from_sample(self, data_to_load: Any) -> None:
pass # Nothing here
def load_data_from_sample(self, data_to_load: Any) -> Optional[Any]:
pass # Nothing here
def initialize_minibatch(self) -> Dict[str, Any]:
pass # Nothing here
def extend_minibatch_by_sample(self, datapoint: Any, accumulated_minibatch_data: Dict[str, Any]) -> bool:
pass # Nothing here
def finalize_minibatch(self, accumulated_minibatch_data: Dict[str, Any]) -> Dict[str, Any]:
pass # Nothing here
def _finalize_component_metadata_and_model(self) -> None:
self.__Whd = nn.Parameter(torch.randn(self.get_hyperparameter('memories_hidden_dimension'), self.get_hyperparameter('lookup_hidden_dimension'),
dtype=torch.float, requires_grad=True))
self.__Wout = nn.Linear(self.get_hyperparameter('memories_hidden_dimension') + self.get_hyperparameter('lookup_hidden_dimension'),
self.get_hyperparameter('output_size'),
bias=False)
def forward(self, *, memories: torch.Tensor, memories_length: torch.Tensor,
lookup_vectors: torch.Tensor) -> torch.Tensor:
return self.forward_with_attention_vec(memories=memories, memories_length=memories_length, lookup_vectors=lookup_vectors)[0]
def forward_with_attention_vec(self, *, memories: torch.Tensor, memories_length: torch.Tensor, lookup_vectors: torch.Tensor) -> torch.Tensor:
# memories: B x max-inp-len x H
# memories_length: B
# look_up_vectors: B x max-out-len x D
attention = self.get_attention_vector(lookup_vectors, memories, memories_length) # B x max-out-len x max-inp-len
contexts = torch.einsum('blq,bqh->blh', attention, memories) # B x max-out-len x H
hc = torch.cat([contexts, lookup_vectors], dim=-1) # B x max-out-len x H
return torch.tanh(self.__Wout(hc)), attention
def get_attention_vector(self, lookup_vectors, memories, memories_length):
# memories: B x max-inp-len x H
# memories_length: B
# look_up_vectors: B x max-out-len x D
# Output: B x max-out-len x max-inp-len
memories_in_d = torch.einsum('blh,hd->bld', memories, self.__Whd) # B x max-inp-len x D
logits = torch.einsum('bld,bqd->bql', memories_in_d, lookup_vectors) # B x max-out-len x max-inp-len
mask = (torch.arange(memories.shape[1], device=self.device).view(1, -1) >= memories_length.view(-1, 1)).unsqueeze(1) # B x 1 x max-inp-len
logits.masked_fill_(mask, float('-inf'))
attention = nn.functional.softmax(logits, dim=-1) # B x max-len
return attention
|