path
stringclasses 28
values | content_id
stringclasses 28
values | detected_licenses
sequence | license_type
stringclasses 2
values | repo_name
stringclasses 28
values | repo_url
stringclasses 28
values | star_events_count
int64 0
94
| fork_events_count
int64 0
80
| gha_license_id
stringclasses 1
value | gha_event_created_at
timestamp[us] | gha_updated_at
timestamp[us] | gha_language
stringclasses 1
value | language
stringclasses 1
value | is_generated
bool 1
class | is_vendor
bool 1
class | conversion_extension
stringclasses 1
value | size
int64 1.73k
10.1M
| script
stringclasses 28
values | script_size
int64 1.88k
116k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
/assignments/01_Intro2images-stVer.ipynb | 4c19b92543a0a199321351440743bd0c5a184586 | [] | no_license | iknyazeva/ML2020 | https://github.com/iknyazeva/ML2020 | 0 | 13 | null | 2020-09-21T09:56:38 | 2020-09-21T09:53:56 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 10,090,911 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + Collapsed="false"
import numpy as np
from skimage import data, io, color
import matplotlib.patches as patches
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams["figure.figsize"] = (6,3)
# + [markdown] Collapsed="false"
# # Изображение как массив: начало
#
# В pyhton есть встроенный модель skimage для работы с изображениями. С помощью него, не устанавливая ничего дополнительно, уже можно многое сделать. Ваша задача изучить самостоятельно материал про то, как устроено зрение
# [Наш опорный курс по компьютерному зрению](https://courses.cs.washington.edu/courses/cse455/18sp/), посмотрите лекцию по Human vision и восприятию зрения. Если вам не нравится материал этого лектора - можете использовать любой другой. В результате у вас должна появиться заметка в маркдауне (используйте картинки с прямыми ссылками или загружайте их к себе в репозиторий и оттуда подгружайте потом). Эту заметку вы добавляете к себе на сайт к следующему занятию, а также отправляете в гугл класс. Также надо доделать эту тетрадку и тоже отправить ее на проверку
#
# + Collapsed="false"
print("Доступные изображения в skimage \n")
'; '.join(data.__all__[2:])
# + [markdown] Collapsed="false"
# ## Как прочитать изображение?
# Во многих пакетах есть средства работы с изображениям, посмотрим встроенные
# - средствами matplotlib.image (используется Pillow (PIL))
# - skiimage
#
# Есть разница откуда читать изображение, если оно находится по ссылке, то напрямую PIL не будет работать, skimage умеет читать прямо из ссылки
# + Collapsed="false"
# если на диске
fig, ax = plt.subplots(ncols = 2, figsize = (10,8))
ax[0].imshow(io.imread('imgs/Kyoto.jpg'));ax[0].axis('off');
ax[1].imshow(mpimg.imread('imgs/Kyoto.jpg')); ax[1].axis('off');
# + Collapsed="false"
import requests
from io import BytesIO
from PIL import Image
# + Collapsed="false"
Image.open('imgs/Kyoto.jpg')
# + [markdown] Collapsed="false"
# А если хотим прочитать сразу из урла - не получится, то skimage справится, а PIL - нет
# + Collapsed="false"
im_link = 'https://images.squarespace-cdn.com/content/v1/55ee34aae4b0bf70212ada4c/1479762511721-P1Z10B8ZJDWMPJO9C9TY/ke17ZwdGBToddI8pDm48kPmLlvCIXgndBxNq9fzeZb1Zw-zPPgdn4jUwVcJE1ZvWQUxwkmyExglNqGp0IvTJZamWLI2zvYWH8K3-s_4yszcp2ryTI0HqTOaaUohrI8PIFMLRh9LbupWL4Bv1SDYZc4lRApws2Snwk0j_RSxbNHMKMshLAGzx4R3EDFOm1kBS/Kyoto+3.jpg'
fig, ax = plt.subplots(ncols = 2, figsize = (10,8))
ax[0].imshow(io.imread(im_link));ax[0].axis('off');
ax[1].imshow(mpimg.imread(im_link)); ax[1].axis('off');
# + Collapsed="false"
Image.open(im_link)
# + Collapsed="false"
response = requests.get(im_link)
rdata = BytesIO(response.content)
Image.open(rdata)
# + Collapsed="false"
kyoto = np.array(Image.open(rdata))
# + [markdown] Collapsed="false"
# Эта картинка будет вашим заданием, а потренируемся на кошечках, еще и встроенных
# + [markdown] Collapsed="false"
# ## Изображение как матрица
# Изображение - массив в numpy, можем использовать любые пиксельные трансформации. Как устроен массив чуть попозже, а пока научимся накладывать различные маски
# + Collapsed="false"
#считываем картинку
image = data.chelsea()
#показываем
io.imshow(image);
print('Image dimensions:', image.shape)
print('Image size:', image.size)
# + Collapsed="false"
from skimage.draw import ellipse
# + Collapsed="false"
rr, cc = ellipse(120, 170, 40, 50, image.shape)
img = image.copy()
mask = np.zeros_like(img)
mask[rr,cc] = 1
fig, ax = plt.subplots(ncols = 2, figsize = (10,8))
img[mask==0] = 1
ax[0].imshow(img); ax[0].axis('off');
img = image.copy()
img[mask==1] = 255
ax[1].imshow(img); ax[1].axis('off');
# + [markdown] Collapsed="false"
# ## Задание 1. Bounding box
# Очень часто какой-то объект (лицо например) выделяют с помощью бокса
# возьмите любую картинку, выделите бокс, и рядом отрисуйте содержимое бокса
# + [markdown] Collapsed="false"
# ## Playing with colors
#
# Цветовые схемы, вы с ними еще познакомитесь. А мы пока посмотрим что с этим можно сделать
# Обычно цветное изображение загружается в RGB схеме. То есть изображение это три сконкатенированные матрицы: {Red, Green, Blue}
# + Collapsed="false"
image = data.coffee()
f, ax = plt.subplots(1, 3, figsize=(20,10))
chans = ['R','G','B']
for i in range(3):
ax[i].set_title(chans[i]+' channel')
ax[i].imshow(image[:,:,i], cmap='gray')
ax[i].axis('off')
# + [markdown] Collapsed="false"
# Как получить из цветной картинки изображение в градациях серого? А можно обратно?
# В skimage есть конвертеры skimage.color. Что-то произошло в ячейке ниже. Что я хотела сказать этим примером?
# + Collapsed="false"
grayscale = color.rgb2gray(image)
rgb = color.gray2rgb(grayscale)
fig, ax = plt.subplots(1,3, figsize = (20,10))
ax[0].imshow(image);ax[1].imshow(grayscale);ax[2].imshow(rgb);
# + [markdown] Collapsed="false"
# ## RGB to HUE
#
# Хороший инструмент посмотреть как соответсвтуют карты
# http://math.hws.edu/graphicsbook/demos/c2/rgb-hsv.html
# + Collapsed="false"
from skimage.color import rgb2hsv, hsv2rgb
# + Collapsed="false"
rgb_img = data.coffee()
hsv_img = rgb2hsv(rgb_img)
hue_img = hsv_img[:, :, 0]
sat_img = hsv_img[:, :, 1]
value_img = hsv_img[:, :, 2]
fig, ax = plt.subplots(ncols=4, figsize=(10, 4))
titles = ["RGB image","Hue channel","Saturation channel", "Value channel"]
imgs = [rgb_img, hue_img,sat_img,value_img]
cmaps = [None,'hsv', None,None]
for i in range(4):
ax[i].imshow(imgs[i], cmap = cmaps[i])
ax[i].set_title(titles[i])
ax[i].axis('off')
fig.tight_layout()
# + Collapsed="false"
data.camera().shape
# + [markdown] Collapsed="false"
# Допустим у вас есть любимый цвет и вы хотите видеть все в нем, тогда для этого отлично подходит hsv цветовая схема.
# Посмотрим как наше кофе будет выглятедь в разных цветах
# + Collapsed="false"
def colorize(image, hue):
hsv = color.rgb2hsv(color.gray2rgb(image))
hsv[:, :, 0] = hue
return color.hsv2rgb(hsv)
image = data.coffee()
hue_rotations = np.linspace(0, 1, 6)
colorful_images = [colorize(image, hue) for hue in hue_rotations]
fig, axes = plt.subplots(nrows=2, ncols=3, figsize = (10,8))
for ax, array, hue in zip(axes.flat, colorful_images,hue_rotations):
ax.imshow(array, vmin=0, vmax=1)
ax.set_title(f'Hue equal to {round(hue,3)}')
ax.set_axis_off()
fig.tight_layout()
# + [markdown] Collapsed="false"
# ### Сохранение изображения
# тут все просто:
#
# `io.imsave(filename, source)`
#
# Но если вы посмотрите на внтуреннее представление, то там каждый слой переведен в масштаб от 0 до 1, а стандартное представление это диапазон 0-255, поэтому вы переводите либо руками, либо оставляете это функции, она тогда выдаст предупреждение
# + Collapsed="false"
plt.imshow(colorize(image, 0.5))
io.imsave('imgs/blue_coffee.png', colorize(image, 0.5))
io.imsave('imgs/blue_coffee.png', (255*colorize(image, 0.5)).astype(np.uint8))
# + [markdown] Collapsed="false"
# ## А как быть если захотелось не только базового цвета, но еще и оттенков?
#
# Сделайте модификацию функции colorize, в которой можно задать диапазон базового цвета hue_min, hue_max
# + Collapsed="false"
def colorize_band(image, hue_min,hue_max):
#to do
pass
# + [markdown] Collapsed="false"
# Все оттенки фиолетового у вас будут выглядеть как-то так
# + Collapsed="false"
Image.open('imgs/purple_kyoto.jpg')
# + Collapsed="false"
| 7,627 |
/Featureselection1.ipynb | 6436b45c27083bb89cf6a9ae3aa1bd3bcc1ff9f1 | [] | no_license | gabibu/unsupervisedLearning | https://github.com/gabibu/unsupervisedLearning | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 247,907 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Secret-key or symmetric cryptography
#
# ## 1 DES S-box $S_3$
#
# The input to the DES S-box $S_3$ is $110111$. What’s the output? Use Wikipedia, google, a book or some other source to find the table for $S_3$.
# Source: http://celan.informatik.uni-oldenburg.de/kryptos/info/des/sbox/
# ![Des-Box3.png](img/Des-Box3.png)
#
# Output: 0011
# ## 2 3DES
#
# What is the effective key size of 3DES and why is it not 168 bits?
# + active=""
# it's 112 bits, not 168 due to meet-in-the-middle attack threat.
# -
# ## 3 Differences between AES and Rijndeal
#
# What are the differences between the AES candidate Rijndeal and AES with respect to block size, key size and number of rounds?
# As described in "[The Design of Rijandel](https://www.springer.com/us/book/9783540425809)": "The _only_ difference between Rijandel and the AES is the range of supported values for the block length and cipher key length".
#
# Rijndael is a block cipher with both a variable block length and a variable key length. The block length and the key length can be independently specified to any multiple of 32 bits, with a minimum of 128 bits and a maximum of 256 bits. It would be possible to define versions of Rijndael with a higher block length or key length, but currently there seems no need for it.
#
# The AES fixes the block length to 128 bits, and supports key lengths of 128, 192 or 256 bits only. The extra block and key lengths in Rijndael were not evaluated in the AES selection process, and consequently they are not adopted in the current FIPS standard.
# ## 4 AES S-box
#
# If we input the byte $11011101$ into the AES S-box, what’s the output? Use the table in slides!
# $1101 -> D -> row$
#
# $1101 -> D -> column$
#
# $11011101 -> C1 -> 11000001$
#
# ![AES-S-Box.png](img/AES-S-Box.png)
# ## 5 Other Block ciphers
#
# Compare DES, 3DES and AES with other block ciphers like IDEA, Blowfish, Twofisch, RC5, RC6, Serpent and three more of Your choice. Make a table that shows key size, effective key size, block size, number of rounds, relative velocity of a hard- or software implementation.
# - https://pdfs.semanticscholar.org/e684/4c748d38997bf0de71cd7d05e58b09e310f6.pdf
# - https://www.cse.wustl.edu/~jain/cse567-06/ftp/encryption_perf/
# - http://www.ijcseonline.org/pub_paper/IJCSE-00187.pdf
#
# |Ciphers|key size| effective keysize|block size| number of rounds| relative velocity|
# |:--- |:--- |:--- |:--- |:--- |:--- |
# |DES|56 bits||64bits|16|1|
# |3DES| 112 bits ||64bits|48|0.3-0.5|
# |AES|128,192 or 256||128, 192 or 256|10, 12 or 14|0.6|
# |IDEA|128 bits||64 bits|8.5
# |Blowfish|32-448 bits||64 bits|16|1.2-3|
# |Twofish|
# |RC5|
# |RC6|128,192 or 256||128 bits|20|
# ## 6 Modes of operation
#
# You should be able to produce sketches of the 5 modes of operation and You should be able to write down the equations, relating, IVs (if any), plaintext block, key, ciphertext block, encryption and decryption, XOR.
# You should also understand the influence of a one-bit error in the ciphertext block.
# | Modes of Operation | Long Name | Cipher Type |
# |:--- |:--- |:--- |
# | ECB | Electronic Code Book Mode | Block |
# | CBC | Chained Block Cipher Mode | Block |
# | CFB | Cipher FeedBack Mode | Stream |
# | OFB | Output FeedBack Mode| Stream |
# | CTR | Counter Mode | Stream |
# ### ECB
#
# ![Electronic CodeBook Mode Diagram](img/ECB_Diagram.png)
#
# #### Encryption
# $c_k = E(k, m_k),\ k=1,2,3,...$
#
# #### Decryption
# $m_k = D(k, c_k),\ k=1,2,3,...$
#
# #### Error Propagation
# An error in the ciphertext produces garbage output but does not propagate.
# ### CBC
#
# ![Chained Block Cipher ModeDiagram](img/CBC_Diagram.png)
#
# #### Encryption
# $c_0 = IV$<br/>
# $c_k = E(k,m_k\oplus c_{k-1}),\ k = 1,2,3,...$
#
# #### Decryption
# $c_0 = IV$<br/>
# $m_k = D(k, c_k)\oplus c_{k-1},\ k = 1,2,3,...$
#
# #### Error Propagation
# An error in the ciphertext $c_k$ affects all bits of the corresponding plaintext $m_k$ and the one bit of $m_{k+1}$ with which the erroneous bit in $c_k$ is XOR-ed
# ### CFB
#
# ![Cipher FeedBack Mode Diagram](img/CFB_Diagram.png)
#
# #### Encryption
# $c_0 = IV$<br/>
# $c_i = m_i \oplus E(k, c_{i-1},\ i=1,2,3...$
#
# #### Decryption
# $c_0 = IV$<br/>
# $m_i = c_i \oplus E(k, c_{i-1},\ i=1,2,3...$
#
# #### Error Propagation
# An error in the cipher block $c_k$ produces one error in the plaintext block $m_k$ at the bit position where the error has occured (as it is XOR-ed), and produces garbage in the next plaintext block $m_{k+1}$ as $E(k,c_{k_{faulty}})$ should produce a completely different output than $E(k, c_k)$, and therefore $c_{k+1}\oplus E(k,c_{k_{faulty}})$ should be complete gibberish.
# ### OFB
#
# ![Output FeedBack Mode Diagram](img/OFB_Diagram.png)
#
# #### Encryption
# $z_0 = IV$<br/>
# $z_i = E_k(z_{i-1}),\ i=1,2,3,...$<br/>
# $c_i = m_i\oplus z_i,\ i=1,2,3,...$
#
# #### Decryption
# $z_0 = IV$<br/>
# $z_i = E_k(z_{i-1}),\ i=1,2,3,...$<br/>
# $m_i = c_i\oplus z_i,\ i=1,2,3,...$
#
# #### Error Propagation
# An error in cipher bit $c_i$ leads to an erroneous bit $m_i$ but does not propagate.
# ### CTR
#
# ![Counter Mode Diagram](img/CTR_Diagram.png)
#
# #### Encryption
# $z_0 = IV$<br/>
# $z_i = IV\oplus i,\ i=1,2,3,...$<br/>
# $y_i = x_i\oplus E_k(z_i),\ i=1,2,3,...$
#
# #### Decryption
# $z_0 = IV$<br/>
# $z_i = IV\oplus i,\ i=1,2,3,...$<br/>
# $y_i = x_i\oplus E_k(z_i),\ i=1,2,3,...$
#
# #### Note on the IV
# The IV should be a nonce, but same nonce can be used throughout the session. It's main goal is to offset the counter startpoint, so that using the same key and first message does not generate the same ciphertext (think of handshakes/authentication).
#
# #### Error Propagation
# An error in $y_0$ generates one error in the decrypted $x_0$, but does not propagate.
# ## 7 RC4
#
# Use python in Jupyter Notebook to programm RC4. Do some research on RC4 and find out, why it should not be used any more!
# Siehe auch [Webbrowser: Endgültig Schluss mit RC4](https://www.heise.de/security/meldung/Webbrowser-Endgueltig-Schluss-mit-RC4-2805770.html) und [Der Lange Abschied von RC4](https://www.golem.de/news/verschluesselung-der-lange-abschied-von-rc4-1507-114877.html).
# +
def KSA(key):
keylength = len(key)
S = list(range(256))
j = 0
for i in range(256):
j = (j + S[i] + key[i % keylength]) % 256
S[i], S[j] = S[j], S[i]
return S
def PRGA(S):
i = 0
j = 0
while True:
i = (i + 1) % 256
j = (j + S[i]) % 256
S[i], S[j] = S[j], S[i]
yield S[(S[i] + S[j]) % 256]
def RC4(key):
S = KSA(key)
return PRGA(S)
def convert_key(s):
return [ord(c) for c in s]
# +
key = "Key"
plaintext = "Plaintext"
# ciphertext should be BBF316E8D940AF0AD3
key = convert_key(key)
keystream = RC4(key)
import sys
for c in plaintext:
sys.stdout.write("%02X" % (ord(c) ^ next(keystream)))
# -
# Vulnerabilities:
#
# - Pseudo Random Number Generator PRNG has higher probabilities for some numbers to appear.<br/>
# This lets an attacker analyse some input/output-pairs and find out the key
# - No nonce as input therefore it needs a new key for each stream.<br/>
# Since most applications just concatenate the nonce and the key, this is a problem because "over all possible RC4 keys, the statistics for the first few bytes of output keystream are strongly non-random, leaking information about the key."
# ## 8 Trivium
#
# Use python in Jupyter Notebook to programm Trivium. This is not an easy task: do it in groups of two!
#
# Use $0x00000000000000000000000000000000$ for the key, IV, and plaintext for initial testing.
#
# The expected ciphertext for this should be $0xFBE0BF265859051B517A2E4E239FC97F$.
#
# In the algorithm on slide “_Trivium — Initialization_”, the $+$ represents XOR (which in python is “^”), ·
# represents logical AND (which in python is “&”). The key-stream is
#
# $z_i = t_1 + t_2 + t_3$
#
# and the $i$th byte of the ciphertext $c_i$ of the plaintext $m_i$ is
#
# $c_i = z_i \oplus m_i$
#
# The following [site](https://asecuritysite.com/encryption/trivium) might be of some help!
# +
from collections import deque
from itertools import repeat
from sys import version_info
class Trivium:
def __init__(self, key, iv):
"""in the beginning we need to transform the key as well as the IV.
Afterwards we initialize the state."""
self.state = None
self.counter = 0
self.key = key # self._setLength(key)
self.iv = iv # self._setLength(iv)
# Initialize state
# len 93
init_list = list(map(int, list(self.key)))
init_list += list(repeat(0, 13))
# len 84
init_list += list(map(int, list(self.iv)))
init_list += list(repeat(0, 4))
# len 111
init_list += list(repeat(0, 108))
init_list += list([1, 1, 1])
self.state = deque(init_list)
# Do 4 full cycles, drop output
for i in range(4*288):
self._gen_keystream()
def encrypt(self, message):
"""To be implemented"""
pass
def decrypt(self, cipher):
"""To be implemented"""
#maybe with code from here https://github.com/mortasoft/Trivium/blob/master/trivium.py
# Line 119
pass
def keystream(self):
"""output keystream
only use this when you know what you are doing!!"""
while self.counter < 2**64:
self.counter += 1
yield self._gen_keystream()
def _setLength(self, input_data):
"""we cut off after 80 bits, alternatively we pad these with zeros."""
input_data = "{0:080b}".format(input_data)
if len(input_data) > 80:
input_data = input_data[:(len(input_data)-81):-1]
else:
input_data = input_data[::-1]
return input_data
def _gen_keystream(self):
"""this method generates triviums keystream"""
t_1 = self.state[65] ^ self.state[92]
t_2 = self.state[161] ^ self.state[176]
t_3 = self.state[242] ^ self.state[287]
out = t_1 ^ t_2 ^ t_3
u_1 = t_1 ^ self.state[90] & self.state[91] ^ self.state[170]
u_2 = t_2 ^ self.state[174] & self.state[175] ^ self.state[263]
u_3 = t_3 ^ self.state[285] & self.state[286] ^ self.state[68]
self.state.rotate(1)
self.state[0] = u_3
self.state[93] = u_1
self.state[177] = u_2
return out
import sys
k1="00000000000000000000"
i1="00000000000000000000"
print ("Key: "+k1)
print ("IV: "+i1)
def main():
KEY = hex_to_bits(k1)[::-1]
IV = hex_to_bits(i1)[::-1]
trivium = Trivium(KEY, IV)
next_key_bit = trivium.keystream().__next__
for i in range(1):
keystream = []
for j in range(128):
keystream.append(next_key_bit())
print ("Stream: "+bits_to_hex(keystream))
# Convert strings of hex to strings of bytes and back, little-endian style
_allbytes = dict([("%02X" % i, i) for i in range(256)])
def _hex_to_bytes(s):
return [_allbytes[s[i:i+2].upper()] for i in range(0, len(s), 2)]
def hex_to_bits(s):
return [(b >> i) & 1 for b in _hex_to_bytes(s)
for i in range(8)]
def bits_to_hex(b):
return "".join(["%02X" % sum([b[i + j] << j for j in range(8)])
for i in range(0, len(b), 8)])
if __name__ == "__main__":
main()
# -
# ## 9 OTP
#
# Make your own example with one-time pad. Why is it perfectly secure? Make sure, the key is truly random not used more than once and kept secret from adversaries.
# $m = 0110100001100101011011000110110001101111001000000111011101101111011100100110110001100100$<br />
# $k = 0110110111011101100100110001101100000001010001110010110111101010101110010001101100011100$
# +
m = '0110100001100101011011000110110001101111001000000111011101101111011100100110110001100100'
k = '0110110111011101100100110001101100000001010001110010110111101010101110010001101100011100'
c = int(m,2)^int(k,2)
print('m: ' + m)
print('k: ' + k)
print('c: ' + bin(c)[2:].zfill(len(m)))
print('d: ' + bin(c^int(k,2))[2:].zfill(len(m)))
print('m: ' + m)
| 12,425 |
/code_back_up/backuped_on_sharefolder_2021-01-06_000/00227_Performance_measurement_updated_1221.ipynb | 8335f10935d9168b511306fb2b27f3a6bd5534ca | [] | no_license | TrellixVulnTeam/jian_projects_AAPL | https://github.com/TrellixVulnTeam/jian_projects_AAPL | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 82,546 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Coursework 2: Data Processing
#
# ## Task 1
# This coursework will assess your understanding of using NoSQL to store and retrieve data. You will perform operations on data from the Enron email dataset in a MongoDB database, and write a report detailing the suitability of different types of databases for data science applications. You will be required to run code to answer the given questions in the Jupyter notebook provided, and write a report describing alternative approaches to using MongoDB.
#
# Download the JSON version of the Enron data (using the “Download as zip” to download the data file from http://edshare.soton.ac.uk/19548/, the file is about 380MB) and import into a collection called messages in a database called enron. You do not need to set up any authentication. In the Jupyter notebook provided, perform the following tasks, using the Python PyMongo library.
#
# Answers should be efficient in terms of speed. Answers which are less efficient will not get full marks.
import pymongo
from pymongo import MongoClient
from datetime import datetime
from pprint import pprint
# ### 1)
# Write a function which returns a MongoDB connection object to the "messages" collection. [4 points]
# + nbgrader={"grade": false, "grade_id": "get_collection", "locked": false, "schema_version": 1, "solution": true}
def get_collection():
"""
Connects to the server, and returns a collection object
of the `messages` collection in the `enron` database
"""
# YOUR CODE HERE
return None
# -
# ### 2)
#
# Write a function which returns the amount of emails in the messages collection in total. [4 points]
# + nbgrader={"grade": false, "grade_id": "get_amount_of_messages", "locked": false, "schema_version": 1, "solution": true}
def get_amount_of_messages(collection):
"""
:param collection A PyMongo collection object
:return the amount of documents in the collection
"""
# YOUR CODE HERE
pass
# -
# ### 3)
#
# Write a function which returns each person who was BCCed on an email. Include each person only once, and display only their name according to the X-To header. [4 points]
#
#
# + nbgrader={"grade": false, "grade_id": "get_bcced_people", "locked": false, "schema_version": 1, "solution": true}
def get_bcced_people(collection):
"""
:param collection A PyMongo collection object
:return the names of the people who have received an email by BCC
"""
# YOUR CODE HERE
pass
# -
# ### 4)
#
# Write a function with parameter subject, which gets all emails in a thread with that parameter, and orders them by date (ascending). “An email thread is an email message that includes a running list of all the succeeding replies starting with the original email.”, check for detail descriptions at https://www.techopedia.com/definition/1503/email-thread [4 points]
# + nbgrader={"grade": false, "grade_id": "get_emails_in_thread", "locked": false, "schema_version": 1, "solution": true}
def get_emails_in_thread(collection, subject):
"""
:param collection A PyMongo collection object
:return All emails in the thread with that subject
"""
# YOUR CODE HERE
pass
# -
# ### 5)
#
# Write a function which returns the percentage of emails sent on a weekend (i.e., Saturday and Sunday) as a `float` between 0 and 1. [6 points]
# + nbgrader={"grade": false, "grade_id": "get_percentage_sent_on_weekend", "locked": false, "schema_version": 1, "solution": true}
def get_percentage_sent_on_weekend(collection):
"""
:param collection A PyMongo collection object
:return A float between 0 and 1
"""
# YOUR CODE HERE
pass
# -
# ### 6)
#
# Write a function with parameter limit. The function should return for each email account: the number of emails sent, the number of emails received, and the total number of emails (sent and received). Use the following format: [{"contact": "michael.simmons@enron.com", "from": 42, "to": 92, "total": 134}] and the information contained in the To, From, and Cc headers. Sort the output in descending order by the total number of emails. Use the parameter limit to specify the number of results to be returned. If limit is null, the function should return all results. If limit is higher than null, the function should return the number of results specified as limit. limit cannot take negative values. [10 points]
# + nbgrader={"grade": false, "grade_id": "get_emails_between_contacts", "locked": false, "schema_version": 1, "solution": true}
def get_emails_between_contacts(collection, limit):
"""
Shows the communications between contacts
Sort by the descending order of total emails using the To, From, and Cc headers.
:param `collection` A PyMongo collection object
:param `limit` An integer specifying the amount to display, or
if null will display all outputs
:return A list of objects of the form:
[{
'contact': <<Another email address>>
'from':
'to':
'total':
},{.....}]
"""
# YOUR CODE HERE
pass
# -
# ### 7)
# Write a function to find out the number of senders who were also direct receivers. Direct receiver means the email is sent to the person directly, not via cc or bcc. [4 points]
def get_from_to_people(collection):
"""
:param collection A PyMongo collection object
:return the NUMBER of the people who have sent emails and received emails as direct receivers.
"""
# YOUR CODE HERE
pass
# ### 8)
# Write a function with parameters start_date and end_date, which returns the number of email messages that have been sent between those specified dates, including start_date and end_date [4 points]
def get_emails_between_dates(collection, start_date, end_date):
"""
:param collection A PyMongo collection object
:return All emails between the specified start_date and end_date
"""
# YOUR CODE HERE
pass
# ## Task 2
# This task will assess your ability to use the Hadoop Streaming API and MapReduce to process data. For each of the questions below, you are expected to write two python scripts, one for the Map phase and one for the Reduce phase. You are also expected to provide the correct parameters to the `hadoop` command to run the MapReduce process. Write down your answers in the specified cells below.
#
# To get started, you need to download and unzip the YouTube dataset (available at http://edshare.soton.ac.uk/19547/) onto the machine where you have Hadoop installed (this should be the virtual machine provided).
#
# To help you, `%%writefile` has been added to the top of the cells, automatically writing them to "mapper.py" and "reducer.py" respectively when the cells are run.
# ### 1)
# Using Youtube01-Psy.csv, find the hourly interval in which most spam was sent. The output should be in the form of a single key-value pair, where the value is a datetime at the start of the hour with the highest number of spam comments. [9 points]
from datetime import datetime
import csv
import sys
# +
# DEBUGGING SCRIPT FOR MAPPER
dates = [
'2013-11-07T06:20:48',
'2013-11-07T12:37:15',
'2014-01-19T04:27:18',
'2014-01-19T08:55:53',
'2014-01-19T20:31:10'
]
spam_class = [1,1,0,0,1]
for x in range(len(dates)):
if spam_class[x] == 1:
date = dates[x].strip()
date_as_date = datetime.strptime(date, '%Y-%m-%dT%H:%M:%S')
day = date_as_date.date().day
month = date_as_date.date().month
year = date_as_date.date().year
hour = date_as_date.hour
print (str(day) + '|' + str(month) + '|' + str(year) + '|' + str(hour) + '\t' + '1')
# +
test = [1,2,3]
test = test[1:]
# +
# %%writefile mapper.py
# #!/usr/bin/env python
# MAPPER
import csv
import sys
from datetime import datetime
lines = sys.stdin.readlines()
csvreader = csv.reader(lines)
dates = []
spam_class = []
input_for_reducer = []
counter = 0
for row in csvreader:
if counter > 0:
dates.append(row[2])
spam_class.append(row[4])
counter += 1
if (len(dates) != len(spam_class)):
print ('Unequal number of entries in Date and Class columns... Aborting...')
sys.exit()
for x in range(len(dates)):
if spam_class[x] == '1':
date = dates[x].strip()
date_as_date = datetime.strptime(date, '%Y-%m-%dT%H:%M:%S')
day = date_as_date.date().day
month = date_as_date.date().month
year = date_as_date.date().year
hour = date_as_date.hour
print (str(day) + '|' + str(month) + '|' + str(year) + '|' + str(hour) + '\t' + '1')
# -
# If the dates in our input file are arranged such that the dates (at an hourly interval) occur in groups, we can perform the Reduce operation in linear time.
#
# It is observed in the data that the column 'Date' is indeed sorted in ascending order
#
# So the dates (at an hourly interval) are in groups
#
#
#
# +
# DEBUGGING SCRIPT FOR REDUCER
input_pairs = [
'7|11|2013|6 1',
'7|11|2013|6 1',
'7|11|2013|12 1',
'7|11|2013|12 1',
'7|11|2013|12 1',
'19|1|2014|20 1'
]
dates_list = []
date_count_dict = dict()
final_dict = {
'hour_with_most_spam': None,
'value_of_max_spam_count': 0
}
for input_pair in input_pairs:
input_list = input_pair.split('\t', 1)
if (len(input_list) != 2):
continue
dates_list.append(input_list[0])
dates_list
for date in dates_list:
if date in date_count_dict.keys():
date_count_dict[date] += 1
else:
date_count_dict[date] = 1
date_count_dict_sorted = sorted(date_count_dict.items(), key=lambda date_count_value: date_count_value[1],
reverse=True)
final_dict['hour_with_most_spam'] = date_count_dict_sorted[0][0]
final_dict['value_of_max_spam_count'] = date_count_dict_sorted[0][1]
final_dict
# +
# %%writefile reducer.py
# #!/usr/bin/env python
# REDUCER
import sys
from datetime import datetime
input_pairs = sys.stdin.readlines()
dates_list = []
date_count_dict = dict()
final_dict = {
'hour_with_most_spam': None,
'value_of_max_spam_count': 0
}
for input_pair in input_pairs:
input_list = input_pair.split('\t', 1)
if (len(input_list) != 2):
continue
dates_list.append(input_list[0])
dates_list
for date in dates_list:
if date in date_count_dict.keys():
date_count_dict[date] += 1
else:
date_count_dict[date] = 1
date_count_dict_sorted = sorted(date_count_dict.items(), key=lambda date_count_value: date_count_value[1],
reverse=True)
final_dict['hour_with_most_spam'] = date_count_dict_sorted[0][0]
final_dict['value_of_max_spam_count'] = date_count_dict_sorted[0][1]
for key, value in final_dict.items():
print (key + "\t" + str(value))
# +
myList = [1,1,1,2,2,2,2,3,3]
max_count = 1
max_elem = myList[0]
curr_count = 1
for x in range(1, len(myList)):
if (myList[x] == myList[x-1]):
# same elem, inc counter
curr_count += 1
else:
# diff elem
if curr_count > max_count:
max_count = curr_count
max_elem = myList[x - 1]
curr_count = 1
# last element check
if curr_count > max_count:
max_count = curr_count
max_elem = myList[x - 1]
print (max_elem)
# + language="bash"
# cat ./Youtube01-Psy.csv | ./mapper.py | ./reducer.py
# + language="bash"
#
# # Clear output
# rm -rf output1
#
# # Make sure hadoop is in standalone mode
# hadoop-standalone-mode.sh
#
# # Main pipeline command
# hadoop jar $HADOOP_HOME/share/hadoop/tools/lib/hadoop-streaming-*.jar \
# -files mapper.py,reducer.py \
# -input Youtube01-Psy.csv \
# -mapper ./mapper.py \
# -reducer ./reducer.py \
# -output output1
# + language="bash"
# #Hadoop command to run the map reduce.
#
# hadoop jar $HADOOP_HOME/share/hadoop/tools/lib/hadoop-streaming-*.jar \
# -files mapper.py,reducer.py \
# -input Youtube01-Psy.csv \
# -mapper ./mapper.py \
# -reducer ./reducer.py \
# -output output
# +
#Expected key-value output format:
#hour_with_most_spam "2013-11-10T10:00:00"
#Additional key-value pairs are acceptable, as long as the hour_with_most_spam pair is correct.
# -
# ### 2)
# Find all comments associated with a username (the AUTHOR field). Return a JSON array of all comments associated with that username. (This should use the data from all 5 data files: Psy, KatyPerry, LMFAO, Eminem, Shakira) [11 points]
# +
# %%writefile mapper1.py
# #!/usr/bin/env python
#Answer for mapper.py
# importing the libraries
import csv
import sys
def mapper_function(required_username):
# function that accepts an username as input
# counter keeps track of number of rows left, so that we can skip the first row (headers)
counter = 0
for row in csvreader:
if counter > 0:
usernames.append(row[1])
comments.append(row[3])
counter += 1
if (len(usernames) != len(comments)):
print ('Unequal number of entries in Author and Content... Aborting...')
sys.exit()
# pass the required username and the comments for that username to reducer stage
for x in range(len(usernames)):
if required_username == usernames[x]:
print (str(usernames[x]) + '\t' + str(comments[x]))
lines = sys.stdin.readlines()
# read from csv
csvreader = csv.reader(lines)
usernames = []
comments = []
# get username from command line argument
required_username = str(sys.argv[1])
mapper_function(required_username)
# +
# %%writefile reducer1.py
# #!/usr/bin/env python
#Answer for reducer.py
import sys
final_dict = {
'username': None,
'comments': []
}
# get input from mapper job
input_pairs = sys.stdin.readlines()
for input_pair in input_pairs:
# split the tab separated input (username\tcomment)
input_list = input_pair.split('\t', 1)
if (len(input_list) != 2):
continue
# append each comment
final_dict['comments'].append(input_list[1])
# set the username if it is not set
if final_dict['username'] is None:
final_dict['username'] = input_list[0]
# print out the output in desired form: username\t[..comments..]
print (final_dict.values()[0] + '\t' + str(final_dict.values()[1]))
# + language="bash"
# cat ./test_files/Youtube02-KatyPerry.csv ./test_files/Youtube01-Psy.csv \
# ./test_files/Youtube03-LMFAO.csv ./test_files/Youtube04-Eminem.csv ./test_files/Youtube05-Shakira.csv | ./mapper1.py 'Mini' | ./reducer1.py
# + language="bash"
#
# # Clear output
# rm -rf output2
#
# # Make sure hadoop is in standalone mode
# hadoop-standalone-mode.sh
#
# # Main pipeline command
# hadoop jar $HADOOP_HOME/share/hadoop/tools/lib/hadoop-streaming-*.jar \
# -files mapper1.py,reducer1.py \
# -input ./test_files/Youtube01-Psy.csv ./test_files/Youtube02-KatyPerry.csv ./test_files/Youtube03-LMFAO.csv \
# -mapper 'mapper1.py Mini' -file ./mapper1.py \
# -reducer ./reducer1.py \
# -output output2
# +
#Expected key-value output format:
#John Smith ["Comment 1", "Comment 2", "Comment 3", "etc."]
#Jane Doe ["Comment 1", "Comment 2", "Comment 3", "etc."]
# -
cel(writer,"audience_by_group_both",index=False)
df_output_audience_tireonly.to_excel(writer,"audience_by_group_tireonly",index=False)
df_both_by_trans.to_excel(writer,"trans_detail",index=False)
df_output_by_store_period.to_excel(writer,"store_by_each_period",index=False)
writer.save()
# -
df_both_by_trans.shape
| 15,859 |
/Jupyter/02. Python para ciencia de datos intermedio/Python para ciencia de datos intermedio.ipynb | d86c329ce86a16fff6ee4355e0d700457fd55cb7 | [] | no_license | juanesoc/Curso-python | https://github.com/juanesoc/Curso-python | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 291,183 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 控制迷宫寻宝机器人
#
# 在这个项目中,你将使用刚刚学到的知识,尝试根据要求,编写代码,来控制一个机器人,在模拟环境中行走,并找到目标宝藏。
#
# 机器人所在的模拟环境中,会包含这样几个因素:机器人的起点、障碍物、宝藏箱。你的任务包括:
#
# 1. 分析模拟环境的数据
# 2. 控制机器人随机行动
# 3. (可选)控制机器人走到终点
#
#
# * 一个良好的含有注释的代码,可以让你的程序可读性更高,尝试为你自己的代码添加相应的注释。
# ---
#
# ---
#
# ## 第一节 分析模拟环境的数据
#
# 首先,只有足够了解机器人所在的环境,我们的机器人才能成功找到目标宝藏,因此首先我们来对机器人所在环境的数据进行分析。在这个部分,会考察你对数据结构、控制流的了解。
#
# ### 1.1 理解模拟环境数据的储存格式
#
# 首先我们思考这样的问题:如何存储模拟环境的数据呢?
#
# 我们将我们的模拟环境抽象成一个格子世界,每个格子按照坐标编号进行标记;每个格子中会有四个情况,分别为普通格子(可通行)、机器人的起点(可通行)、障碍物(不可通行)、宝藏箱(目标点)。例如,一个模拟环境就可以抽象成3行4列的格子世界,并按这按这样的方法进行存储:
# ```
# environment = [[0,0,0,2],
# [1,2,0,0],
# [0,2,3,2]]
# ```
# 我们用了一个列表来保存虚拟世界的数据。外层列表中的每一个元素依然是一个列表,它代表模拟环境中每行的数据。而对于这个列表中的每个元素都是一个数,它们的含义是:
# - 0: 普通格子(可通行)
# - 1: 机器人的起点(可通行)
# - 2: 障碍物(不可通行)
# - 3: 宝藏箱(目标点)
#
# 那么,根据上述的数据,这个迷宫的第二行第一列,是我们机器人的起点。
#
# __注:我们描述的迷宫的坐标位置(第一行第一列),和迷宫下标索引的值(如 `(0,0)`)是不一样的,请注意下标的问题。__
#
#
# 如下的代码,使用了辅助函数,读取了模拟环境的数据,并保存在了 `env_data` 变量中。
#
# +
import helper
env_data = helper.fetch_maze()
# -
# ---
#
#
# **任务1:**在如下代码中,请写代码获得这些值:
#
# 1. 模拟环境的长和宽
# 2. 模拟环境中第3行第6列元素
# +
#DONE 1模拟环境的行数
rows = len(env_data)
#DONE 2模拟环境的列数
columns = len(env_data[0])
#DONE 3取出模拟环境第三行第六列的元素
row_3_col_6 = env_data[2] [5]
print("迷宫共有", rows, "行", columns, "列,第三行第六列的元素是", row_3_col_6)
# -
# ---
#
# ## 1.2 分析模拟环境数据
#
# 接着我们需要对模拟环境的中的数据进行分析。请根据如下的指示,计算相应的值。
#
# ---
#
# **任务2:**在如下代码中,请计算模拟环境中,第一行和第三列的障碍物个数。
#
# 提示:*可以用循环完成。*
# +
#DONE 4计算模拟环境中,第一行的的障碍物个数。
def count_row(list, num):
total = 0
for i in list:
if i == num:
total += 1
return total
number_of_barriers_row1 = count_row(env_data[0], 2)
#DONE in half 5计算模拟环境中,第三列的的障碍物个数。
def count_col(list, num, col):
total = 0
tem_col = []
for i in range(len(list)):
tem_col.append(list[i][col - 1])
for num in tem_col:
if num == 2:
total += 1
return total
number_of_barriers_col3 = count_col(env_data, 2, 3)
print("迷宫中,第一行共有", number_of_barriers_row1, "个障碍物,第三列共有", number_of_barriers_col3, "个障碍物。")
# -
# %run -i -e test.py RobotControllortTestCase.test_cal_barriers
# ---
#
# **任务3:**在如下代码中:
#
# 1. 创建一个名为 `loc_map` 的字典,它有两个键值,分别为 `start` 和 `destination`,对应的值分别为起点和目标点的坐标,它们以如 `(0,0)` 的形式保存为元组。
# 2. 从字典中取出 `start` 对应的值,保存在 `robot_current_loc` 对应的变量中,这个变量表示小车现在的位置。
# +
loc_map = {'start':(0, 8), 'destination':(0, 0)} #Done 6按照上述要求创建字典
robot_current_loc = loc_map['start'] #done 7保存机器人当前的位置
# -
# %run -i -e test.py RobotControllortTestCase.test_cal_loc_map
#
# ---
#
# ---
#
# ## 第二节 控制机器人随机漫步
#
# 在这一步中,你需发出指令,控制机器人在环境中随机行动。它会考察你对控制流、调用函数的知识。
#
#
# ## 2.1 控制机器人行动
#
# 我们的机器人能够执行四个动作:向上走 `u`、向下走 `d`、向左走 `l`、向右走 `r`。但是,由于有障碍,很多时候机器人的行动并不能成功。所以在这里,你需要实现一个函数,来判断机器人在某个位置,执行某个移动动作是否可行。
#
# ---
#
# **任务4:**在下方代码中,实现名为 `is_move_valid_special` 的函数,它有两个输入,分别为机器人所在的位置坐标 `loc`,以及即将执行的动作 `act`,如 `(1,1)` 及 `u`。接着它的返回是一个布尔值,表明小车在 `loc` 位置下,是否可以执行动作 `act`。
#
#
# 提示1:*可以读取上方定义的 `env_data` 变量,来读取模拟环境的数据。*
#
# 提示2:*在实现函数后,请删去下方的 `pass` 代码。*
#
# 提示3:*我们需要处理边界的情况,即机器人走到了虚拟环境边界时,是不能够走出虚拟环境的。*
# +
def is_move_valid_special(loc, act):
"""
Judge wether the robot can take action act
at location loc.
Keyword arguments:
loc -- tuple, robots current location
act -- string, robots meant action
"""
#DONE IN HALF
row = loc[0]
col = loc[1]
if act == 'u':
if row == 0:
return False
elif env_data[row - 1][col] != 2:
return True
elif act == 'd':
if row == len(env_data) - 1:
return False
elif env_data[row + 1][col] != 2:
return True
elif act == 'l':
if col == 0:
return False
elif env_data[row][col - 1] != 2:
return True
elif act == 'r':
if col == len(env_data[0]) - 1:
return False
elif env_data[row][col + 1] != 2:
return True
# -
# %run -i -e test.py RobotControllortTestCase.test_is_move_valid_special
# ---
# **任务5:**在下方代码中,重新实现一个名为 `is_move_valid` 的函数,它有三个输入,分别为模拟环境的数据 `env_data`、机器人所在的位置坐标 `loc`、以及即将执行的动作 `act`。它的返回值与此前一样,是一个布尔值,表明小车在给定的虚拟环境中的 `loc` 位置下,是否可以执行动作 `act`。
def is_move_valid(env_data, loc, act):
"""
Judge wether the robot can take action act
at location loc.
Keyword arguments:
env -- list, the environment data
loc -- tuple, robots current location
act -- string, robots meant action
"""
#TODO 9
pass
# %run -i -e test.py RobotControllortTestCase.test_is_move_valid
# ---
#
# **任务6:**请回答:
# 1. 在任务4及任务5中的实现的两个函数中,`env_data` 这个变量有什么不同?
# 2. 调用``is_move_valid``函数,参数为``env_data_``、``loc_``、``act_``,如果在函数内修改``env_data``是否会改变``env_data_``的值?为什么?
#
# 提示:_可以尝试从变量作用域的角度回答该问题1。_
#
#
# 提示:_可以尝试从可变类型变量和不可变类型变量的角度回答该问题2。_
#
#
# **回答:** (请在这里填写你的回答)
# ---
#
# ## 2.2 机器人可行动作
#
# ---
#
# **任务7:**编写一个名为 `valid_actions` 的函数。它有两个输入,分别为虚拟环境的数据 `env_data`,以及机器人所在的位置 `loc`,输出是一个列表,表明机器人在这个位置所有的可行动作。
#
# 提示:*可以尝试调用上方定义的`is_move_valid`函数。*
#
# +
## TODO 10 从头定义、实现你的函数
# -
# %run -i -e test.py RobotControllortTestCase.test_valid_actions
# ---
#
# ## 2.3 移动机器人
#
# 当机器人收到一个动作的时候,你机器人的位置应发生相应的变化。
#
# **任务8:**编写一个名为 `move_robot` 的函数,它有两个输入,分别为机器人当前所在的位置 `loc` 和即将执行的动作 `act`。接着会返回机器人执行动作之后的新位置 `new_loc`。
# +
##TODO 11 从头定义、实现你的函数
# -
# %run -i -e test.py RobotControllortTestCase.test_move_robot
# ---
#
# ## 2.4 随机移动机器人
#
# 接着,我们尝试在虚拟环境中随机移动机器人,看看会有什么效果。
#
# **任务9:**编写一个名为 `random_choose_actions` 的函数,它有两个输入,分别为虚拟环境的数据 `env_data`,以及机器人所在的位置 `loc`。机器人会执行一个300次的循环,每次循环,他会执行以下任务:
#
# 1. 利用上方定义的 `valid_actions` 函数,找出当前位置下,机器人可行的动作;
# 2. 利用 `random` 库中的 `choice` 函数,从机器人可行的动作中,随机挑选出一个动作;
# 3. 接着根据这个动作,利用上方定义的 `move_robot` 函数,来移动机器人,并更新机器人的位置;
# 4. 当机器人走到终点时,输出“在第n个回合找到宝藏!”。
#
# 提示:如果机器人无法在300个回合内找到宝藏的话,试试看增大这个数字,也许会有不错的效果 :P
# +
##TODO 12 从头实现你的函数
# -
# 运行
random_choose_actions(env_data, robot_current_loc)
#
# ---
#
# ---
#
# ## (可选)第三节 控制机器人走到终点
#
# ## 3.1 控制机器人走到终点
#
# 在这里,你将综合上述的知识,编码控制机器人走到终点。这个任务对刚刚入门的你来说可能有些挑战,所以它是一个选做题。
#
# **任务10**:尝试实现一个算法,能够对给定的模拟环境,输出机器人的行动策略,使之能够走到终点。
#
# 提示:_你可以尝试参考:_
# * 深度/广度优先算法。
# 以及以下参考资料:
# 1. https://blog.csdn.net/raphealguo/article/details/7523411
# 2. https://www.cnblogs.com/yupeng/p/3414736.html
# * A星算法。
# 以及以下参考资料:
# 1. https://baike.baidu.com/item/A%2A算法
# 2. https://blog.csdn.net/hitwhylz/article/details/23089415
# +
##TODO 13 实现你的算法
# -
# > 注意: 当你写完了所有的代码,并且回答了所有的问题。你就可以把你的 iPython Notebook 导出成 HTML 文件。你可以在菜单栏,这样导出**File -> Download as -> HTML (.html)**把这个 HTML 和这个 iPython notebook 一起做为你的作业提交。
| 6,789 |
/hw2/Alexander_Telepov_hw2_p2.ipynb | 05bf7d0796fb7a35670f223717f0f8d1a4c0d287 | [] | no_license | alexander-telepov/ml-course-skoltech | https://github.com/alexander-telepov/ml-course-skoltech | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 431,082 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="bWEEBnVC-Irv"
# # Home Assignment No. 2 - part two
#
# To solve this task, you will write a lot of code to try several machine learning methods for classification and regression.
# * You are **HIGHLY RECOMMENDED** to read relevant documentation, e.g. for [python](https://docs.python.org/3/), [numpy](https://docs.scipy.org/doc/numpy/reference/), [matlpotlib](https://matplotlib.org/) and [sklearn](https://scikit-learn.org/stable/). Also remember that seminars, lecture slides, [Google](http://google.com) and [StackOverflow](https://stackoverflow.com/) are your close friends during this course (and, probably, whole life?).
#
# * If you want an easy life, you have to use **BUILT-IN METHODS** of `sklearn` library instead of writing tons of your own code. There exists a class/method for almost everything you can imagine (related to this homework).
#
# * You have to write **CODE** directly inside specified places marked by comments: **BEGIN/END Solution**. Do not create new cells.
#
# * In some problems you are asked to provide a short discussion of the results. For that find the specific place marked via **Your text answer: \<write your answer\>**.
#
# * For every separate problem or subproblem (if specified) you can get only 0 points or maximal points for this problem. There are **NO INTERMEDIATE scores**. So make sure that you did everything required in the task.
#
# * Your **SOLUTION** notebook **MUST BE REPRODUCIBLE**, i.e., if the reviewer decides to restart the notebook and run all cells, after all the computation he will obtain exactly the same solution (with all the corresponding plots) as in your uploaded notebook. For this purpose, we suggest fixing random `seed` or (better) define `random_state=` inside every algorithm that uses some pseudorandomness.
#
# * Your code must be clear to the reviewer. For this purpose, try to include necessary comments inside the code. But remember: **GOOD CODE MUST BE SELF-EXPLANATORY** without any additional comments.
#
# * Many `sklearn` algorithms support multithreading (Ensemble Methods, Cross-Validation, etc.). Check if the particular algorithm has `n_jobs` parameters and set it to `-1` to use all the cores.
#
# + [markdown] id="ddR3sf3P82Ht"
# ## Task 6. Deep ANNs. (3 points)
#
# - **(1 pt.)** Activation functions; **(sub tasks 6.1)**
# - **(2 pt.)** MNIST classification. **(sub tasks 6.2)**
#
#
#
# ### Task 6.1 Activation functions.
# Plot the following [activation functions](https://pytorch.org/docs/master/nn.html#non-linear-activation-functions) using their PyTorch implementation and their derivatives using [autograd](https://pytorch.org/docs/stable/autograd.html) functionality `grad()`:
#
# 1. Plot `ReLU`, `ELU` ($\alpha = 1$), `Softplus` ($\beta = 1$) and `Sign`, `Sigmoid`, `Softsign`, `Tanh`.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 369} id="hcF-2GHz8wMz" outputId="150fa83f-feb5-4fbd-9aa0-b9d02f47707f"
# %matplotlib inline
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torch
x = torch.arange(-2, 2, .01, requires_grad=True)
x_np = x.detach().numpy()
x.sum().backward() # to create x.grad
f, axes = plt.subplots(2, 2, sharex=True, figsize=(14, 5))
axes[0, 0].set_title('Values')
axes[0, 1].set_title('Derivatives')
for i, function_set in (0, (('ReLU', F.relu), ('ELU', F.elu), ('Softplus', F.softplus))), \
(1, (('Sign', torch.sign), ('Sigmoid', torch.sigmoid), ('Softsign', F.softsign), ('Tanh', torch.tanh))):
for function_name, activation in function_set:
### BEGIN Solution
axes[i, 0].plot(x_np, activation(x).detach().numpy(), label=function_name)
x.grad.zero_()
activation(x).sum().backward()
axes[i, 1].plot(x_np, x.grad.detach().numpy(), label=function_name)
### END Solution
axes[i, 0].legend()
axes[i, 1].legend()
plt.tight_layout()
plt.show()
# + [markdown] id="_misNcjO8wXF"
# Which of these functions may be, and which - definitely, are a poor choise as an activation function in a neural network? Why? Do not forget that output of the current layer servers as an input for the following one. Imagine a situation where we have many layers, what happens with the activation values?
#
#
#
#
#
#
# + id="ribXsHDSmlYN"
# BEGIN SOLUTION (do not delete this comment!)
# * ReLU good choice, but have zero grad in big range
# * ELU good choice
# * Softplus good choice
# * Sign bad choice: almost everywhere zero derivative
# * Sigmoid bad choice: saturates fast - derivative nonzero in small range
# * SoftSign maybe bad choice: saturates but slowly then Sigmoid, Tanh
# * Tanh bad choice: saturates fast - derivative nonzero in small range
#END SOLUTION (do not delete this comment!)
# + [markdown] id="sW9OYyIw8wz4"
# ### Task 6.2 MNIST classification.
#
# At one of the seminars we have discussed an MLP (Multilayer perceptron) with one hidden layer, logistic activation functions and softmax. In this task, you are to:
#
# 1. Implement the MLP modules, including the Softmax cross entropy between `logits` and `labels` using numpy.
#
# 2. Train your numpy realization of MLP to classify the MNIST from `sklearn.datasets()`. The required accuracy on validation is `> 90%`.
#
# 3. Compare the acccuracy of classification to your scores from `Part 1` with and without dimensionality reduction. Is this comparison fair?:) Derive the confusion matrix for all digits classes. Which digits are predicted better or worse than others, why?
# + id="RKxe88YT_p9P"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score
# + id="E7_2lHue_r0x"
# fetch the dataset.
digits, targets = load_digits(return_X_y=True)
digits = digits.astype(np.float32) / 255
digits_train, digits_test, targets_train, targets_test = train_test_split(digits, targets, random_state=0)
train_size = digits_train.shape[0]
test_size = digits_test.shape[0]
input_size = 8*8
classes_n = 10
# + colab={"base_uri": "https://localhost:8080/", "height": 111} id="feWQtoKtn1vV" outputId="47edde13-7736-4ef6-f45a-8a315638e00e"
N = 10
sample_idx = np.random.choice(1797, N, replace=False)
digits_sample = digits[sample_idx]
targets_sample = targets[sample_idx]
f, ax = plt.subplots(1,10, figsize=(10, 5))
for i in range(N):
ax[i].imshow(digits_sample[i].reshape(8,8))
ax[i].set_title('label: '+str(targets_sample[i]))
# + [markdown] id="Pj6EctS6yTJK"
# A short recap on what we are going to achieve here.
# <br>
# 1. Forward pass:
# $$
# h_1 = X\theta_1+\beta_1
# $$
#
# $$
# O_1 = sig(h_1)
# $$
#
# $$
# h_2 = O_1\theta_2+\beta_2
# $$
# $$
# O_2 = softmax(h_2)
# $$
# $$
# Loss = CrossEntropy(O_2, true \space labels)
# $$
#
# 2. Compute gradients:
#
# To update weights first we need to compute loss gradients with respect to $\theta_1$ and $\theta_2$ and then update both $\theta$ and $\beta$.
#
# $$
# \frac{ \partial{loss} }{\partial{\theta_2}} = \frac{ \partial{loss} }{\partial{O_2}}\frac{ \partial{O_2} }{\partial{h_2}}\frac{ \partial{h_2} }{\partial{\theta_2}}
# $$
# Note, that $\frac{ \partial{h_2} }{\partial{\theta_2}}=O_1$, so we can cache this value during forward pass to speed up our computation.
# $$
# \frac{ \partial{loss} }{\partial{\theta_1}} = \frac{ \partial{loss} }{\partial{O_2}}\frac{ \partial{O_2} }{\partial{h_2}}\frac{ \partial{h_2} }{\partial{O_1}}\frac{ \partial{O_1} }{\partial{h_1}}\frac{ \partial{h_1} }{\partial{\theta_1}}
# $$
# Note, that $\frac{ \partial{h_1} }{\partial{\theta_1}}=X$.
#
# Since we are using sigmoid avtivation function here and
# $$
# \frac{ \partial{sig} }{\partial{h}} = sig(h)(1 - sig(h))
# $$
# It also makes sense to cache sig(h) during forward pass.
#
# 3. Update weights:
#
# $\theta:= \theta - \frac{ \partial{loss} }{\partial{\theta}}\alpha$, where $\alpha$ is some learning rate.
#
# Note, it was not shown here how to update and compute $\beta$ but you can do it!
# + [markdown] id="CaBenjDI_x6k"
# ### Implement the MLP with backprop
# + id="ffpXAKqQ_vfg"
### YOUR TASK STARTS HERE ###
#Here you should implement by yourself MLP class and its constituents including forward and backward propagation methods
class Linear:
def __init__(self, input_size, output_size):
# Trainable parameters of the layer and their gradients
self.thetas = np.random.randn(input_size, output_size) # the weight matrix of the layer (W)
self.thetas_grads = np.empty_like(self.thetas) # gradient w.r.t. the weight matrix of the layer
self.bias = np.random.randn(output_size) # bias terms of the layer (b)
self.bias_grads = np.empty_like(self.bias) # gradient w.r.t. bias terms of the linear layer
def forward(self, x):
# keep x for backward computation
self.x = x
output = np.matmul(x, self.thetas) + self.bias
return output
def backward(self, output_grad, learning_rate):
"""
Calculate and return gradient of the loss w.r.t. the input of linear layer given the input x and the gradient
w.r.t output of linear layer. You should also calculate and update gradients of layer parameters.
:param x: np.array, input tensor for linear layer;
:param output_grad: np.array, grad tensor w.r.t output of linear layer;
:return: np.array, grad w.r.t input of linear layer
"""
# BEGIN SOLUTION (do not delete this comment!)
input_grad = output_grad @ self.thetas.T
# calculate mean of gradients across batch w.r.t weights, bias
n = output_grad.shape[0]
self.thetas_grads = self.x.T @ output_grad / n
self.bias_grads = output_grad.mean(axis=0)
self.step(learning_rate)
# END Solution (do not delete this comment!)
return input_grad
def step(self, learning_rate):
self.thetas -= self.thetas_grads * learning_rate
self.bias -= self.bias_grads * learning_rate
class LogisticActivation:
def __init__(self):
# the layer has no parameters
pass
def sig(self, x):
return 1/(1 + np.exp(-x))
def forward(self, x):
# keep o for backward computation
self.o = self.sig(x)
return self.o
def backward(self, output_grad, learning_rate):
"""
Calculate and return the gradient of the loss w.r.t. the input
of logistic non-linearity (given input x and the gradient
w.r.t output of logistic non-linearity).
:param x: np.array, input tensor for logistic non-linearity;
:param output_grad: np.array, grad tensor w.r.t output of logistic non-linearity;
:return: np.array, grad w.r.t input of logistic non-linearity
"""
# BEGIN SOLUTION (do not delete this comment!)
o = self.o
input_grad = o * (1 - o) * output_grad
### END Solution (do not delete this comment!)
return input_grad
class MLP:
def __init__(self, input_size, hidden_layer_size, output_size):
self.linear1 = Linear(input_size, hidden_layer_size)
self.activation1 = LogisticActivation()
self.linear2 = Linear(hidden_layer_size, output_size)
def forward(self, x):
h1 = self.linear1.forward(x)
h1a = self.activation1.forward(h1)
out = self.linear2.forward(h1a)
return out
def backward(self, output_grad, learning_rate):
"""
Calculate and return the gradient of the loss w.r.t. the input of MLP given the input and the gradient
w.r.t output of MLP. You should also update gradients of paramerters of MLP layers.
Hint - you should chain backward operations of modules you have already implemented. You may also
need to calculate intermediate forward results.
:param x: np.array, input tensor for MLP;
:param output_grad: np.array, grad tensor w.r.t output of MLP;
:return: np.array, grad w.r.t input of MLP
"""
# BEGIN SOLUTION (do not delete this comment!)
linear2_input_grad = self.linear2.backward(output_grad, learning_rate)
activation1_input_grad = self.activation1.backward(linear2_input_grad, learning_rate)
out = self.linear1.backward(activation1_input_grad, learning_rate)
# END Solution (do not delete this comment!)
return out
# + id="07DUqp86_0To"
# BEGIN SOLUTION (do not delete this comment!)
def softmax_crossentropy_with_logits(logits, reference_answers):
reference_answers_ = np.zeros_like(logits)
I = np.arange(logits.shape[0])
reference_answers_[I, reference_answers] = 1
loss = np.sum(reference_answers_ * (-logits + np.log(np.sum(np.exp(logits)))))
### END Solution
return loss
def grad_softmax_crossentropy_with_logits(logits, reference_answers):
reference_answers_ = np.zeros_like(logits)
I = np.arange(logits.shape[0])
reference_answers_[I, reference_answers] = 1
grad = logits - reference_answers_
return grad
# BEGIN Solution
# + colab={"base_uri": "https://localhost:8080/"} id="DWkD2V1y_4QU" outputId="d544a97a-41f5-4e64-afae-00b2737e4167"
np.random.seed(42)
mlp = MLP(input_size=input_size, hidden_layer_size=100, output_size=classes_n)
epochs_n = 100
learning_curve = [0] * epochs_n
test_curve = [0] * epochs_n
x_train = digits_train
x_test = digits_test
y_train = targets_train
y_test = targets_test
learning_rate = 1e-2
for epoch in range(epochs_n):
y_pred = []
for sample_i in range(train_size):
x = x_train[sample_i].reshape((1, -1))
target = np.array([y_train[sample_i]])
### BEGIN Solution
# ... perform forward pass and compute the loss
# ... compute the gradients w.r.t. the input of softmax layer
# ... perform backward pass
# ... and update the weights with weight -= grad * learning_rate
logits = mlp.forward(x)
loss = softmax_crossentropy_with_logits(logits, target)
logits_grad = grad_softmax_crossentropy_with_logits(logits, target)
mlp.backward(logits_grad, learning_rate)
### END Solution
y_pred.extend(logits.argmax(1))
if epoch % 10 == 0:
y_pred_test = []
for sample_i in range(test_size):
x = x_test[sample_i].reshape((1, -1))
target = np.array([y_test[sample_i]])
logits = mlp.forward(x)
y_pred_test.extend(logits.argmax(1))
print('Starting epoch {}'.format(epoch), \
', Loss : {:.3}'.format(loss), \
', Accuracy on train: {:.3}'.format(accuracy_score(y_train, y_pred)), \
', Accuracy on test: {:.3}'.format(accuracy_score(y_test, y_pred_test)) )
# + colab={"base_uri": "https://localhost:8080/"} id="0DNQhxXaARCy" outputId="df7b0f28-cb53-4e7b-d1a3-dfcd222636eb"
# BEGIN SOLUTION (do not delete this comment!)
# confusion matrix
from sklearn.metrics import confusion_matrix
y_pred = np.empty_like(y_test)
for sample_i in range(test_size):
x = x_test[sample_i].reshape((1, -1))
target = np.array([y_test[sample_i]])
logits = mlp.forward(x)
y_pred[sample_i] = logits.argmax(1)
confusion_matrix(y_test.astype(np.int), y_pred.astype(np.int))
# END Solution (do not delete this comment!)
# + [markdown] id="MkSdyrpn8xdE"
# ## Task 7. Autoencoders on tabular data (2 points)
# **From now on we will be using pytorch for all the tasks.**
#
# We will build a latent representation for tabular data with simple Autoencoder (AE). We are going to work with the cancer dataset from scikit-learn package. You are to follow the instructions.
#
# 1. **(1 pt.)** Implement AE modules for tabular data. Train AE to get latent representation of the cancer dataset from `sklearn.datasets()`. Use `MSE` loss and get < $0.3$ on validation, with AE "bottleneck" = $2$; **(sub tasks 7.1 - 7.5)**
#
# 2. **(1 pt.)** Plot the latent representation of whole dataset in 2D, use colors to show object of different classes. **(sub tasks: 7.6)**
#
# + id="Sg5fX833AX9q"
# imports
import torch
import torch.nn as nn
import torch.utils.data as torch_data
import sklearn.datasets as sk_data
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
# + [markdown] id="AYtA62xA8xgB"
# #### 7.1 Fetch the data. Scale it and split on train and test.
# + colab={"base_uri": "https://localhost:8080/"} id="BinFOZc7Abpx" outputId="fa0f0e2d-5114-4343-916a-39bff7ba0722"
cancer_dset = sk_data.load_breast_cancer()
X_train, X_val, y_train, y_val = train_test_split(cancer_dset['data'], cancer_dset['target'], test_size=0.2, random_state=42)
print('\nTrain size: ', len(X_train))
print('Validation size: ', len(X_val))
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_val = scaler.transform(X_val)
print('Features: ', list(cancer_dset['feature_names']))
print('\nShape:', X_train.shape)
# + [markdown] id="x7Dzo8VIAaaf"
# #### 7.2 Let us firstly create the dataset, which we'll be able to use with pytorch dataloader.
# Implement `__len__` and `__getitem__` methods.
# + id="Vi4Cq7DtAl8u"
### BEGIN Solution
class CancerData(torch_data.Dataset):
def __init__(self, X, y):
super(CancerData, self).__init__()
self.X = torch.tensor(X, dtype=torch.float32)
self.y = torch.tensor(y, dtype=torch.float32)
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
### END Solution
# + colab={"base_uri": "https://localhost:8080/"} id="f4emJDB1ApHh" outputId="d695a094-f426-4cf4-880b-c05037673a1e"
train_dset = CancerData(X_train, y_train)
val_dset = CancerData(X_val, y_val)
print(train_dset[5])
# + [markdown] id="ksiBurhhAapc"
# #### 7.3 Now, we'll make a base class for our autoencoder.
# AE takes as input encoder and decoder (it will be two neural networks). Your task is to implement the forward pass.
# + id="wVlgW3_rAqgu"
class MyFirstAE(nn.Module):
def __init__(self, encoder, decoder):
super(MyFirstAE, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, x):
"""
Take a mini-batch as an input, encode it to the latent space and decode back to the original space
x_out = decoder(encoder(x))
:param x: torch.tensor, (MB, x_dim)
:return: torch.tensor, (MB, x_dim)
"""
# BEGIN SOLUTION (do not delete this comment!)
x = self.encoder(x)
x = self.decoder(x)
# END Solution (do not delete this comment!)
return x
# + [markdown] id="rp39DZHDAzfQ"
# #### It is high time to create encoder and decoder neural networks!
# Make hidden size of the network to be equal to `2`.
#
# **Hint.** You can use `nn.Sequential` to create your own archtectures.
# + id="gQ5Zuro3Aqmu"
encoder = lambda hid: nn.Sequential(
nn.Linear(30, 20),
nn.LeakyReLU(inplace=True),
nn.Linear(20, 10),
nn.LeakyReLU(inplace=True),
nn.Linear(10, hid)
)
decoder = lambda hid: nn.Sequential(
nn.Linear(hid, 10),
nn.LeakyReLU(inplace=True),
nn.Linear(10, 20),
nn.LeakyReLU(inplace=True),
nn.Linear(20, 30),
)
# + id="NOiJCm00A2t5"
device = 'cpu'
from torch.optim.lr_scheduler import StepLR
net = MyFirstAE(encoder(2), decoder(2))
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.01, weight_decay=0.001)
scheduler = StepLR(optimizer, 30, gamma=0.5)
train_loader = torch_data.DataLoader(train_dset, batch_size=50, shuffle=True)
val_loader = torch_data.DataLoader(val_dset, batch_size=200, shuffle=False)
# + [markdown] id="hgOC1-iZA6ev"
# #### 7.4 Implement the missing parts of the `train` function
# + id="3SvYM1dxA5kI"
def train(epochs, net, criterion, optimizer, train_loader, val_loader,scheduler=None, verbose=True, save_dir=None):
freq = max(epochs//20,1)
net.to(device)
for epoch in range(1, epochs+1):
net.train()
losses_train = []
for X, _ in train_loader:
### BEGIN Solution
# Perform one step of minibatch stochastic gradient descent
reconstruction = net.forward(X)
optimizer.zero_grad()
loss = criterion(X, reconstruction)
loss.backward()
optimizer.step()
losses_train.append(loss.item())
# define NN evaluation, i.e. turn off dropouts, batchnorms, etc.
net.eval()
## replace from loop
losses_val = []
for X, _ in val_loader:
# Compute the validation loss
with torch.no_grad():
reconstruction = net.forward(X)
loss = criterion(X, reconstruction)
losses_val.append(loss.item())
### END Solution
if scheduler is not None:
scheduler.step()
if verbose and epoch%freq==0:
mean_val = sum(losses_val)/len(losses_val)
mean_train = sum(losses_train)/len(losses_train)
print('Epoch {}/{} || Loss: Train {:.4f} | Validation {:.4f}'\
.format(epoch, epochs, mean_train, mean_val))
# + [markdown] id="IsrutdnJAasT"
# #### 7.5 Train your AE on breast cancer dataset.
# Your goal is to get validation error <0.3.
#
# Some features that may help you to improve the performance:
# * `Dropout`
# * `Batchnorm`
# * lr scheduler
# * Batch size increase/decrease
# + colab={"base_uri": "https://localhost:8080/"} id="Gj9Bk-RQBHcD" outputId="63f79d3b-ff91-4238-e8f3-fe4a32fdcb73"
# for `MSE` loss get < 0.3 on validation, with AE "bottleneck" = 2
train(100, net, criterion, optimizer, train_loader, val_loader, scheduler)
# + [markdown] id="9Tq4AMlDBCjW"
# #### 7.5 Let us take a look at the latent space.
# Encode the whole dataset, using your AE, plot it in 2D and use colors to indicate objects of differrent classes
# + colab={"base_uri": "https://localhost:8080/", "height": 336} id="_DD8qANbBN1s" outputId="31562596-70df-4719-a1d0-7bf4ba625541"
### BEGIN Solution
plt.figure(figsize=(14, 5))
net.eval()
with torch.no_grad():
enc = net.forward(torch.from_numpy(scaler.transform(cancer_dset['data'])).float()).detach().cpu()
plt.scatter(enc[:,0], enc[:,1], c=cancer_dset['target'], alpha=0.7);
plt.title('Latent space from the Autoencoder bottle neck, purple dots go for malignant samples. ');
### END Solution
# + [markdown] id="ufty_3qKBCwD"
# ### Task 8. Autoencoder on kMNIST. (2 points)
#
#
# We will build a latent representation for `kMNIST` dataset by using our AE.
#
# 1. **(1 pt.)** Train AE to get latent representation of the `kMNIST` dataset from `sklearn.datasets()`. Follow the instructions. Use `MSE` loss and obtain < $0.035$ on validation, with AE "bottleneck" $\leq 40$; **(sub tasks 8.1 - 8.2)**
# 2. **(1 pt.)** Plot 10 images and their reconstructions 2d. **(sub tasks 8.3)**
# + colab={"base_uri": "https://localhost:8080/", "height": 437, "referenced_widgets": ["9ea812fbae8b49a394df152e81fd359b", "d5177c89f9f64109809b98e3f7d12dcc", "ce378fbbbb4241abb1ccef1dadb77d04", "ff482e7b527e469bb6b56ebeb86f1576", "36ed9e979101449ba503116f3edf275e", "5945a58a17d24d5693703666efa8d714", "ce0259dd657e42d3988c92f5342c77a2", "e5058123f36344a0a6fec92a14751443", "611486a8f5844a6b84cad64a25eddfd7", "9ed16aa24f52411d904293d9c133958c", "15be64c126e1483f97cbb316cdd1f387", "627c9bca8a3c48d8a0b2567060bffd06", "b1bd9def1565419eb45a9ed7791612d6", "3757c84e012f4185a54d142311afaa70", "b14c5c5076e740348e751c58947f309d", "af21568ed3de4580beecc796f2c1574f", "5128c5524e7a43bda2ee14b3ecf90cf2", "412eeabcdd4d44c5b57a6fef085f913a", "f44e9c37697b45be88e25cfaa8756d63", "bce0850f0c9447a3919601d75485fc2b", "6d6d75dc2de249a7b03f3f5395619313", "5f54d80b86044766a7dfe3e49a71b46a", "94b87f82e52c4a27972ed4ba92503310", "5756dd971756457abac2a4a8e717047b", "3efa3908aa3c4eacb1671f176bdff33e", "ee76ea60574d43bbbd2cde6d98bd60f3", "d38d3449e1844015819d76f560c5718e", "d078c6bc68d543d38cf0e56253c6d3d5", "f05bda5aaec34aeb99edfe3e648926d3", "9f78f5369e6644f79de3dd05d20328b4", "8a8dcda652ef41cea608d8537a4ae845", "6cb3a11cf2c24fbc89e14e0c054c2404"]} id="mldP_RZZN7bm" outputId="8124e750-c7ad-45c2-c0a3-50399581e997"
from torchvision.datasets import KMNIST
data_train = KMNIST(train=True, root='./kmnist', download=True)
data_test = KMNIST(train=False, root='./kmnist', download=True)
# + [markdown] id="KZsmSJ3vuQrd"
# #### 8.1 Prepare the data and necessary functions.
# + id="KoRlKg3vOZCW"
x_train = np.array(data_train.data)
y_train = np.array(data_train.targets)
x_test = np.array(data_test.data)
y_test = np.array(data_test.targets)
# + id="fM3bsc0wBWTH"
# Reshape the data and scale
from sklearn.preprocessing import MaxAbsScaler
scaler = MaxAbsScaler()
n_train, n_test = x_train.shape[0], x_test.shape[0]
scaler.fit(x_train.reshape((n_train, -1)))
x_train = scaler.transform(x_train.reshape((n_train, -1))).reshape(n_train, 1, 28, 28)
x_test = scaler.transform(x_test.reshape((n_test, -1))).reshape(n_test, 1, 28, 28)
# + colab={"base_uri": "https://localhost:8080/", "height": 125} id="Tz2892txBYJk" outputId="6473c845-2646-4555-e82f-8a9ccae48a1a"
fig, ax = plt.subplots(ncols=10, figsize=(20, 5))
for i in range(10):
ax[i].imshow(scaler.inverse_transform(x_train[i].reshape(1,-1)).reshape(28,28));
ax[i].axis('off')
# + id="b5sHmYVxBeCV"
# BEGIN SOLUTION (do not delete this comment!)
class kMNISTData(torch_data.Dataset):
def __init__(self, X, y):
super(kMNISTData, self).__init__()
self.X = torch.tensor(X, dtype=torch.float32)
self.y = torch.tensor(y, dtype=torch.float32)
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx].to('cuda'), self.y[idx].to('cuda')
# END Solution (do not delete this comment!)
# + id="lfla6EKiBgNy"
train_kmnist = kMNISTData(x_train, y_train)
test_kmnist = kMNISTData(x_test, y_test)
# + [markdown] id="J1jIdmh9uI-r"
# #### 8.2 Create encoder and decoder network for kMNIST.
# You can either use convolutions or flatten the images and use linear layers. You can choose hidden size (not larger than 40) and any architecture you like.
# + id="uvECUsVcBkmB"
# BEGIN SOLUTION (do not delete this comment!)
class Reshape(nn.Module):
def __init__(self, *shape):
super(Reshape, self).__init__()
self.shape = shape
def forward(self, x):
return x.view(self.shape)
encoder = lambda hid: nn.Sequential(
nn.Conv2d(1, 12, 2, 2),
nn.BatchNorm2d(12),
nn.LeakyReLU(),
nn.Conv2d(12, 12, 3, 1, 1),
nn.BatchNorm2d(12),
nn.LeakyReLU(),
nn.Conv2d(12, 12, 2, 2),
nn.BatchNorm2d(12),
nn.LeakyReLU(),
nn.Conv2d(12, 6, 3, 1, 1),
nn.BatchNorm2d(6),
nn.Flatten(),
nn.Linear(6*7*7, hid)
)
decoder = lambda hid: nn.Sequential(
nn.Linear(hid, 6 * 7 * 7),
Reshape(-1, 6, 7, 7),
nn.BatchNorm2d(6),
nn.LeakyReLU(),
nn.Conv2d(6, 12, 3, 1, 1),
nn.BatchNorm2d(12),
nn.ConvTranspose2d(12, 12, 2, 2),
nn.BatchNorm2d(12),
nn.LeakyReLU(),
nn.Conv2d(12, 6, 3, 1, 1),
nn.BatchNorm2d(6),
nn.LeakyReLU(),
nn.ConvTranspose2d(6, 6, 2, 2),
nn.BatchNorm2d(6),
nn.LeakyReLU(),
nn.Conv2d(6, 1, 3, 1, 1),
)
# END Solution (do not delete this comment!)
# + id="Id-iNSswBpe9"
# BEGIN SOLUTION (do not delete this comment!)
device = 'cuda'
epochs = 25
net = MyFirstAE(encoder(40), decoder(40))
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.01, weight_decay=0.001)
scheduler = StepLR(optimizer, 10, gamma=0.2)
train_loader = torch_data.DataLoader(train_kmnist, batch_size=100, shuffle=True)
val_loader = torch_data.DataLoader(test_kmnist, batch_size=500, shuffle=False)
# END Solution (do not delete this comment!)
# + colab={"base_uri": "https://localhost:8080/"} id="ytDZI0spBsFl" outputId="62697d23-46b4-4a44-a050-ad5199e96e81"
train(epochs, net, criterion, optimizer, train_loader, val_loader, scheduler)
# + [markdown] id="cXCR-eKBBuRI"
# #### 8.3 Plot any 10 images and their reconstructions.
# + colab={"base_uri": "https://localhost:8080/", "height": 290} id="ggGOxCc4Bvm5" outputId="fbfbc06a-2622-45ac-fd8e-3f99be6b581f"
# BEGIN SOLUTION (do not delete this comment!)
fig, ax = plt.subplots(ncols=10, nrows=2, figsize=(20, 5))
for i in range(10):
im = train_kmnist[i][0]
rec = net.forward(im.reshape(1,1,28,28)).detach().cpu().numpy()
ax[0, i].imshow(scaler.inverse_transform(im.cpu().reshape(1,-1)).reshape(28,28));
ax[1, i].imshow(scaler.inverse_transform(rec.reshape(1,-1)).reshape(28,28))
ax[0, i].set_title('original')
ax[1, i].set_title('reconstruction')
ax[0, i].axis('off')
ax[1, i].axis('off')
# END Solution (do not delete this comment!)
# + [markdown] id="1seXNwq3KoYM"
# ## Task 9. Convolutional NN (4 points)
#
#
# In this task, you will need to answer two questions and train a convolution neural network for a task of sound classification.
#
# - **(1 pt.)** Debug the given convolutional neural network and explain what's wrong with it and how to fix it. You will need to identify at least 4 problems; **(sub-tasks 9.1)**
#
# - **(1 pt.)** Compute manually outputs of each layer, often when we build a neural network we need to know the output sizes of a layer before we add the next on; **(sub-tasks 9.2)**
#
# - **(2 pt.)** Build your own convolutional NN and train it for the task of sound classification. Your goal is to achieve maximum quality > 70% 1pt and > 90% 2pt. **(sub-tasks 9.3 - 9.6)**
# + [markdown] id="4fCPSsn3K22j"
# #### 9.1 Debug this convolutional neural network and write down proposed fixes. Ther are at least four fixes that can be applied. Explain your answers.
# + id="jQDXJDhFLI6a"
# assuming input shape [batch, 3, 32, 32]
cnn = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=512, kernel_size=(3,3)), # 30
nn.Conv2d(in_channels=512, out_channels=128, kernel_size=(3,3)), # 28
nn.Conv2d(in_channels=128, out_channels=10, kernel_size=(3,3)), # 26
nn.ReLU(),
nn.MaxPool2d((1,1)),
nn.Conv2d(in_channels=10, out_channels=3, kernel_size=(10,10)), # 17
nn.Conv2d(in_channels=3, out_channels=64, kernel_size=(10,10)), # 8
nn.MaxPool2d((15,15)),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(10,10)),
nn.Softmax(),
Flatten(),
nn.Linear(64, 256),
nn.Softmax(),
nn.Linear(256, 10),
nn.Sigmoid(),
nn.Dropout(0.5)
)
# + id="0qomga4ALpdi"
# BEGIN SOLUTION (do not delete this comment!)
# Your answers:
# 1. Max poling with kernel 1 doesn't do anything
# 2. After 5-th convolution spatial size equal 8, but used maxpooling
# with kernel size 15; also problems after with sizes: if suppose 6-th conv produce
# spatial size 1 linear layer should take 128 size instead of 64
# 3. Softmax shouldn't be used not as last layer (same problems as sigmoid have);
# for training purposes better use not softmax with nll loss, but use crossentropy
# which is more numerically stable and more computationally efficient
# 4. Sigmoid bad choice because it is saturates fast; also better use bce with
# logits function then place sigmoid as last layer with bce loss
# 5. Dropout should be inserted after linear layers except last layer (not before output)
# 6. Shortucts may increase quality, however network not very deep
# 7. No normalization layers which can increase stability
# 8. Fast dimension sizes changing commonly produce worse results than architectures with slowly changes sizes
# END Solution (do not delete this comment!)
# + [markdown] id="uh8BusFMQm-2"
# #### 9.2 Convolutional warm up, compute manually outpust shapes for each layer.
# + id="iGvJl1LxQoKw"
# Compute output shape for each and final layers wihout running the code.
# input size x = [8, 1 , 300, 303].
conv1 = Conv2d(in_chаnnels=1, out_channels=16, kernel_size=(5, 5), рadding = 0, stride=2)
conv2 = Conv2d(in_channels=16, out_chаnnels=16, kernel_size=(3, 3), рadding = 2, stride=1)
conv3 = Conv2d(in_channels=16, out_chаnnels=16, kernel_size=(5, 5), рadding =2, stride=2)
maxpool1 = MаxPool2d((2, 2))
cnn = nn.Sequential(conv1, conv2, conv3, maxpool1)
# + id="2ly8YywpLMKh"
# BEGIN SOLUTION (do not delete this comment!)
# example:
# conv1
# output_h = (300 - 5+0) /2 +1 = 148
# output_w = (303 - 5+0) /2 +1 = 150
# Continue for all the layers:
### BEGIN Souluion
# conv2
# output_h = (148 - 3 + 2 * 2) / 1 + 1 = 150
# output_w = (150 - 3 + 2 * 2) / 1 + 1 = 152
# conv3
# output_h = (150 - 5 + 2 * 2) / 2 + 1 = 75
# output_w = (152 - 5 + 2 * 2) / 2 + 1 = 76
# maxpool1 = MaxPool2d((2, 2))
# output_h = 75 / 2 = 37
# output_w = 76 / 2 = 38
# final layer output = [8, 16, 37, 38]
# END Solution (do not delete this comment!)
# + [markdown] id="1D1z6WEjfZwT"
# #### 9.3 Convolutional networks for sound classication
#
# - Now your task is to classify sounds using the convolutional network. You can use different network architectures. And your goal is to get the highest score possible.
#
# - First of all, we will preprocess audio into spectrograms, that you will be able to treat them as images.
# + colab={"base_uri": "https://localhost:8080/"} id="t7BsAPwYfv6X" outputId="d48bf5f3-53b7-4ed5-fd1a-836473edae21"
# imports
import os
import torch
import numpy as np
import torch.nn as nn
from torch import Tensor
# !pip install torchaudio
import torchaudio
from torchaudio import transforms
from IPython.display import Audio
import torch.nn.functional as F
from torch.utils.data import DataLoader,random_split,Dataset
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, accuracy_score
# + colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["6d4a12e73a344069981447c9543053b9", "29c3287e1d1a476b89e8406f1c6f596a", "28af3e6e18824d7e9dcda0778c573883", "61216fae41b7448e8f724145e50d554e", "de7033ddcff84075be383e61ddb16c83", "c1996ab9c94d45c4aeea099f69441db3", "6b4ed0b03042452fa47a53f0313ddcba", "26aa4453751d4ced87119252e375ca11"]} id="lRCcakoVgFDK" outputId="1f9f1781-4e84-4cfb-dfe6-7dc549253328"
# Get the dataset
dataset = torchaudio.datasets.SPEECHCOMMANDS('./' , url = 'speech_commands_v0.02',
folder_in_archive= 'SpeechCommands', download = True)
# + [markdown] id="kUE5grAGj9SN"
# ### Let's look at the dataset.
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="85jqMauLhNER" outputId="076b5494-2ad8-41c1-d421-1a7caca8c449"
plt.figure()
plt.plot(dataset[0][0].t())
# + colab={"base_uri": "https://localhost:8080/", "height": 92} id="CYGRUGnghIYY" outputId="2ed60715-83b7-4903-d417-580d29ba17d9"
print('Label: ',dataset[11760][2])
Audio(np.array(dataset[11760][0].t()).reshape(-1), rate=16000)
# + [markdown] id="tMBl98nnkRcr"
# #### Actually, we could use really long sequences to classify our samples but it's better to work with them as spectrograms so we can use convolutional layers.
# + colab={"base_uri": "https://localhost:8080/", "height": 339} id="ZysQUugGkWPB" outputId="f56a9252-056e-4a17-be83-83d4a82a21e3"
specgram = torchaudio.transforms.Spectrogram(n_fft=200, normalized=True)(dataset[77][0])
print("Shape of spectrogram: {}".format(specgram.size()))
plt.figure(figsize=(10,5))
plt.imshow(specgram[0,:,:].numpy());
plt.colorbar()
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="yylTFJjPo57s" outputId="7eac223f-3ec0-4c19-a027-9d8f973c2369"
# Some preprocessing routine
# Filter samples only with 16000 sampling rate
# Make labels dictionary
count = 0
wave = []
labels = []
labels_dict = {}
for i in range(0,105829):
if dataset[i][0].shape == (1,16000):
wave.append(dataset[i][0])
labels.append(dataset[i][2])
# + colab={"base_uri": "https://localhost:8080/"} id="YD4Ds2BhqX4T" outputId="146e7799-5433-4f21-c313-d4c6d3512a31"
set_labels = list(set(labels))
labels_dict = {set_labels[i] :i for i in range(len(set_labels))}
labels_dict
# + [markdown] id="W5tkVQDTnMvD"
# #### 9.4 Your task right now is to implement a speech dataloader it will be almost the same as in the previous tasks.
# + id="kuGvJ4EDm90d"
transformation = torchaudio.transforms.Spectrogram(n_fft=200, normalized=True)
### BEGIN Solution
class SpeechDataLoader(Dataset):
def __init__(self, data, labels, label_dict, transform=None):
self.data = data
self.labels = labels
self.label_dict = label_dict
self.transform = transform
def __len__(self):
return len(self.labels)
def __getitem__(self,idx):
waveform = self.data[idx]
specgram = self.transform(waveform)
if self.labels[idx] in self.label_dict:
label = self.label_dict[self.labels[idx]]
return specgram, label
# END Solution (do not delete this comment!)
# + id="OarKlZWooQbS"
torch.manual_seed(0)
dataset= SpeechDataLoader(wave, labels, labels_dict, transformation)
traindata, testdata = random_split(dataset, [round(len(dataset)*.8), round(len(dataset)*.2)], )
train_loader = DataLoader(traindata, batch_size=100, shuffle=True)
val_loader = DataLoader(testdata, batch_size=100, shuffle=True)
# + [markdown] id="gQxaAGoBuLRR"
# #### 9.5 Your task is to build a convolutional neural network that yields a high score.
# + id="ryDzP0l9s4Pi"
# BEGIN Solution (do not delete this comment!)
class BasicBlock(nn.Module):
def __init__(self, in_channels, out_channels, relu=True, cropw=None, croph=None):
super().__init__()
self.backbone = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=2, stride=2),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels)
)
self.shortcut = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=2)
self.cropw = cropw
self.croph = croph
self.relu = relu
def forward(self, x):
out = self.backbone(x)
if self.cropw is not None:
x = x[:, :, :, :-self.cropw]
if self.croph is not None:
x = x[:, :, :-self.croph, :]
out += self.shortcut(x)
if self.relu:
out = F.relu(out)
return out
class NN2D(nn.Module):
def __init__(self, num_class):
super(NN2D,self).__init__()
self.conv = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, padding=1)
self.backbone = nn.Sequential(
BasicBlock(32, 32, cropw=1, croph=1),
BasicBlock(32, 64),
BasicBlock(64, 128, croph=1),
BasicBlock(128, 256),
BasicBlock(256, 512, relu=False)
)
self.linear1 = nn.Linear(512, 128)
self.dropout = nn.Dropout(inplace=True)
self.linear2 = nn.Linear(128, num_class)
def forward(self, x):
out = self.conv(x)
out = self.backbone(out)
out = F.avg_pool2d(out, (3, 5))
out = out.view(out.shape[0], -1)
out = self.linear1(out)
out = self.dropout(out)
out = self.linear2(out)
return out
# END Solution (do not delete this comment!)
# + id="_Ev8ZHfquAMH"
# BEGIN Solution (do not delete this comment!)
from torch.optim import Adam
from torch.nn.functional import cross_entropy
net = NN2D(len(set_labels))
num_epochs = 10
criterion = cross_entropy
optimizer = Adam(net.parameters(), lr=0.001, weight_decay=0.001)
scheduler = StepLR(optimizer, 7, gamma=0.2)
# END Solution (do not delete this comment!)
# + [markdown] id="NpgUGDXJvIhk"
# #### 9.6 Almost there, now, we need to rewrite our training loop a little bit.
# + id="k7BqVHuSvEhZ"
def train(epochs, net, criterion, optimizer, train_loader, val_loader,scheduler=None, verbose=True, device='cpu'):
net.to(device)
freq = max(epochs//15,1)
for epoch in range(1, epochs+1):
net.train()
losses_train = []
for X, target in train_loader:
X, target = X.to(device), target.to(device)
### BEGIN Solution (do not delete this comment!)
# Perform one step of minibatch stochastic gradient descent
predict = net.forward(X)
optimizer.zero_grad()
loss = criterion(predict, target)
loss.backward()
optimizer.step()
losses_train.append(loss.item())
# END Solution (do not delete this comment!)
if scheduler is not None:
scheduler.step()
if verbose and epoch%freq==0:
y_pred_val = []
y_true_val = []
net.eval()
# move from loop
losses_val = []
for X, target in val_loader:
X, target = X.to(device), target.to(device)
# BEGIN Solution (do not delete this comment!)
# Compute the validation loss
with torch.no_grad():
target_hat_val = net.forward(X)
loss = criterion(target_hat_val, target)
losses_val.append(loss.item())
# END Solution (do not delete this comment!)
y_pred_val.extend(target_hat_val.argmax(1).tolist())
y_true_val.extend(target.tolist())
mean_val = sum(losses_val)/len(losses_val)
mean_train = sum(losses_train)/len(losses_train)
print('Val epoch {}'.format(epoch), \
', Loss : {:.3}'.format(mean_train), \
', Accuracy on test: {:.3}'.format(accuracy_score(y_true_val, y_pred_val)) )
# + colab={"base_uri": "https://localhost:8080/"} id="VjHENdc_uqc7" outputId="92400966-8348-46a9-f907-7f61234c963d"
train(num_epochs, net, criterion, optimizer, train_loader, val_loader, scheduler, device=0)
| 44,017 |
/data-structures/recursion/Staircase.ipynb | 2974220531bba573f441680fa2bd29175e5ba5e5 | [] | no_license | annahra/dsa-nanodegree | https://github.com/annahra/dsa-nanodegree | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 4,151 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] graffitiCellId="id_v5swjqy"
# ### Problem Statement
#
# Suppose there is a staircase that you can climb in either 1 step, 2 steps, or 3 steps. In how many possible ways can you climb the staircase if the staircase has `n` steps? Write a recursive function to solve the problem.
#
# **Example:**
#
# * `n == 1` then `answer = 1`
#
# * `n == 3` then `answer = 4`<br>
# The output is `4` because there are four ways we can climb the staircase:
# - 1 step + 1 step + 1 step
# - 1 step + 2 steps
# - 2 steps + 1 step
# - 3 steps
# * `n == 5` then `answer = 13`
#
# + [markdown] graffitiCellId="id_74s7rzj"
# ### Exercise - Write a recursive function to solve this problem
# + graffitiCellId="id_yv3ymjf"
"""
param: n - number of steps in the staircase
Return number of possible ways in which you can climb the staircase
"""
def staircase(n):
'''Hint'''
# Base Case - What holds true for minimum steps possible i.e., n == 0, 1, 2 or 3? Return the number of ways the child can climb n steps.
# Recursive Step - Split the solution into base case if n > 3.
pass
# +
# Solution
## Read input as specified in the question.
## Print output as specified in the question.
def staircase(n):
if n <= 0:
return 1
if n == 1:
return 1
elif n == 2:
return 2
elif n == 3:
return 4
return staircase(n - 1) + staircase(n - 2) + staircase(n - 3)
# + [markdown] graffitiCellId="id_w7lklez"
# <span class="graffiti-highlight graffiti-id_w7lklez-id_brqvnra"><i></i><button>Show Solution</button></span>
# + graffitiCellId="id_qnr80ry"
def test_function(test_case):
n = test_case[0]
solution = test_case[1]
output = staircase(n)
if output == solution:
print("Pass")
else:
print("Fail")
# + graffitiCellId="id_6g7yxbj"
n = 3
solution = 4
test_case = [n, solution]
test_function(test_case)
# + graffitiCellId="id_1q0pz7y"
n = 4
solution = 7
test_case = [n, solution]
test_function(test_case)
# + graffitiCellId="id_p3uxb7h"
n = 7
solution = 44
test_case = [n, solution]
test_function(test_case)
| 2,403 |
/Visual Genome - Regions.ipynb | aaca619c4e7f2a2edbc64ced4d5207349578bf36 | [
"MIT"
] | permissive | MeMAD-project/statistical-tools | https://github.com/MeMAD-project/statistical-tools | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 2,475,773 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introducción a Python: Listas, Iteraciones y Strings
#
# <img style="float: right; margin: 0px 0px 15px 15px;" src="https://www.python.org/static/community_logos/python-logo.png" width="200px" height="200px" />
#
# > Ya conocemos un poco más de la sintaxis de Python, como hacer funciones y como usar condicionales. Es hora que veamos otros tipos de variables (arreglos) y cómo hacer líneas de código que ejecuten operaciones repetitivas.
#
# Referencias:
# - https://www.kaggle.com/learn/python
# ___
# # 1. Listas
#
# Las listas son objetos en Python representan secuencias ordenadas de valores.
#
# Veamos un par de ejemplos de como crearlas:
# Primeros números primos
primos = [2, 5, 3, 7]
# Planetas del sistema solar
planetas = ['Mercurio', 'Venus', 'Tierra', 'Marte',
'Jupiter', 'Saturno', 'Urano', 'Neptuno']
primos
planetas
# Vemos que las listas no son exclusivamente de números.
#
# Ya vimos listas de números, pero también de strings.
#
# Incluso, se pueden hacer listas de listas:
lista_primos_planetas = [primos, planetas]
lista_primos_planetas
# Aún más, se pueden hacer listas de diferentes tipos de objetos:
lista_diferentes_tipos = [2, 0., 'Hola', help, primos]
lista_diferentes_tipos
# Sin duda, en muchas ocasiones nos será muy útil tener una sola lista guardando varios resultados, que muchos resultados guardados en objetos individuales.
#
# Pero, una vez en la lista, ¿cómo accedemos a los objetos individuales?
# ## 1.1 Indizado
#
# Podemos acceder a los elementos individuales pertenecientes a la lista a través de brackets ([])
#
# Por ejemplo, ¿cuál planeta está más cercano al sol en nuestro sistema solar?
#
# - Acá una nota importante: Python usa índices comenzando en cero (0):
# Planeta más cercano al sol
planetas[0]
# Siguiente planeta
planetas[1]
# Todo bien...
#
# Ahora, ¿cuál es el planeta más alejado del sol?
#
# - Los elementos de una lista pueden tambien ser accedidos de atrás para adelante, utilizando números negativos:
# Planeta más alejado del sol
planetas[-1]
# Segundo planeta más alejado
planetas[-2]
# Muy bien...
#
# Y si quisiéramos averiguar, por ejemplo, ¿cuáles son los tres planetas más cercanos al sol?
# Tres primeros planetas
planetas[0:3]
# Entonces `lista[a:b]` es nuestra manera de preguntar por todos los elementos de la lista con índice comenzando en `a` y continuando hasta `b` sin incluir (es decir, hasta `b-1`).
#
# Los índices de comienzo y de término son opcionales:
# - Si no ponemos el índice de inicio, se asume que es cero (0): `lista[:b] == lista[0:b]`
# Reescribir la expresión anterior
planetas[:3]
planetas[-3:]
# - Equivalentemente, si no ponemos el índice de fin, se asume que este equivale a la longitud de la lista:
# Lista de todos los planetas comenzando desde el planeta tierra
planetas[2:]
# También podemos usar índices negativos cuando accedemos a varios objetos.
#
# Por ejemplo, ¿qué obtenemos con las siguientes expresión?
planetas[-1]
# ```python
# lista[n:n + N] = [lista[n], lista[n + 1], ..., lista[n + N - 1]]
# ```
planetas[1:-1]
planetas[-3:]
planetas[:4]
planetas[5:]
planetas[:4] + planetas[5:]
# Slice:
#
# ```python
# lista[n:n+N:s] = [lista[n], lista[n + s], lista[n + 2 * s], ..., ]
# ```
primos
primos[::2]
# Elementos de la lista en reverso (al revés)
primos[::-1]
# ## 1.2 Modificando listas
#
# Las listas son objetos "mutables", es decir, sus objetos pueden ser modificados directamente en la lista.
#
# Una manera de modificar una lista es asignar a un índice.
#
# Por ejemplo, supongamos que la comunidad científica, con argumentos basados en la composición del planeta, decidió modificar el nombre de "Planeta Tierra" a "Planeta Agua".
planetas
planetas[2] = 'Agua'
planetas
# También podemos cambiar varios elementos de la lista a la vez:
planetas[:3] = ['mer', 'ven', 'tie']
planetas
# ## 1.3 Funciones sobre listas
#
# Python posee varias funciones supremamente útiles para trabajar con listas.
#
# `len()` nos proporciona la longitud (número de elementos) de una lista:
# función len()
len(planetas)
len(primos)
# `sorted()` nos regresa una versión ordenada de una lista:
# Ayuda en la función sorted
help(sorted)
primos
# Llamar la función sorted sobre primos
sorted(primos)
sorted(primos, reverse=True)
planetas = ['Mercurio', 'Venus', 'Tierra', 'Marte',
'Jupiter', 'Saturno', 'Urano', 'Neptuno']
# Llamar la función sorted sobre planetas
sorted(planetas)
len('Jupiter')
def long_str(s):
return len(s)
long_str2 = lambda s: len(s)
long_str("Jupiter"), long_str2("Jupiter")
# **Paréntesis: Funciones anónimas**
#
# Las funciones anónimas comienzan con la palabra clave `lambda` seguidas por el (los) argumento(s) de la función. Después de `:` se escribe lo que retorna la función.
sorted(planetas, key=long_str)
sorted(planetas, key=lambda s: len(s))
# `sum()`, ya se imaginarán que hace:
primos
# Ayuda en la función sum
help(sum)
# sum
sum(primos)
# En la clase pasada utilizamos las funciones `min()` y `max()` sobre varios argumentos.
#
# También le podemos pasar un solo argumento tipo lista.
# min
min(primos)
# max
max(primos)
# ___
# ## Pausa: Objetos
#
# Hasta ahora he venido utilizando la palabra **objeto** sin darle mucha importancia. ¿Qué significa en realidad?
#
# - si han visto algo de Python, pueden haber escuchado que todo en Python es un objeto.
#
# En la siguiente semana estudiaremos a nivel muy básico qué es la programación orientada a objetos.
#
# Por ahora, nos basta con saber que los objetos cargan varias "cosas" con ellos, y podemos acceder a estas "cosas" utilizando la "sintaxis punto (.)" de Python.
#
# Por ejemplo, los números en Python tienen una variable asociada llamada `imag`, la cual representa su parte imaginaria:
# Atributos real e imag
a = 7
a.imag, a.real
dir(a)
a.denominator, a.numerator
b = (6 + 5j) / 3
b.real, b.imag
dir(b.imag)
c = 5 / 3
c.as_integer_ratio()
7505999378950827 / 4503599627370496
from fractions import Fraction
Fraction(c).limit_denominator(10)
help(Fraction().limit_denominator)
# Entre las "cosas" que los objetos cargan, también pueden haber funciones.
#
# Una función asociada a un objeto se llama **método**.
#
# Las "cosas" asociadas a los objetos, que no son funciones, son llamados **atributos** (ejemplo: imag).
# Método conjugate()
b.conjugate()
# Y si no sabemos qué hace un método determinado en un objeto, también podemos pasar métodos a la función `help()`, de la misma manera en que le pasamos funciones:
# help(objeto.metodo)
help(b.conjugate)
# Bueno, ¿y esto de que nos sirve?
#
# Pues las listas tienen una infinidad de métodos útiles que estaremos usando...
# ___
# ## 1.4 Métodos de las listas
#
# `list.append()` modifica una lista añadiéndole un elemento en el final:
planetas = ['Mercurio',
'Venus',
'Tierra',
'Marte',
'Jupiter',
'Saturno',
'Urano',
'Neptuno']
# Plutón también es un planeta
variable = planetas.append("Pluton")
print(variable)
planetas
# ¿Porqué no obtuvumos una salida en la celda de arriba?
#
# Verifiquemos la documentación del método append:
help(planetas.append)
help(list.append)
help(append)
# **Comentario:** append es un método de todos los objetos tipo `list`, de manera que habríamos podido llamar `help(list.append)`. Sin embargo, si intentamos llamar `help(append)`, Python nos dirá que no existe nada con el nombre "append", pues `append` solo existe en el contexto de listas.
# `list.pop()` remueve y devuelve el último elemento de una lista:
# Que Plutón siempre no es un planeta
planetas.pop()
planetas
help(planetas.pop)
planetas.pop(1)
planetas
# ### 1.4.1 Buscando en listas
#
# ¿En qué lugar de los planetas se encuentra la Tierra? Podemos obtener su índice usando el método `list.index()`:
planetas = ['Mercurio',
'Venus',
'Tierra',
'Marte',
'Jupiter',
'Saturno',
'Urano',
'Neptuno']
planetas
# índice del planeta tierra
planetas.index("Tierra")
planetas[2]
planetas[planetas.index('Tierra'):]
# Está en el tercer lugar (recordar que el indizado en Python comienza en cero)
#
# ¿En qué lugar está Plutón?
# índice del planeta plutón
planetas.index('Pluton')
# <font color=red> Error ... </font> ¡como debe ser!
#
# Para evitar este tipo de errores, existe el operador `in` para determinar si un elemento particular pertenece a una a una lista:
planetas
# ¿Es la Tierra un planeta?
'Tierra' in planetas
# ¿Es Plutón un planeta?
'Pluton' in planetas
# Usar esto para evitar el error de arriba
if 'Pluton' in planetas:
planetas.index("Pluton")
# Hay otros métodos interesantes de las listas que no veremos. Si quieren aprende más acerca de todos los métodos y atributos de un objeto particular, podemos llamar la función `help()` sobre el objeto.
#
# Por ejemplo:
dir(list)
help(list)
primos
primos.extend([11, 13])
primos
# ## 1.5 Tuplas
#
# También son arreglos de objetos similares a las listas. Se diferencian en dos maneras:
#
# - La sintaxis para crear tuplas usa paréntesis (o nada) en vez de brackets:
t = (1, 2, 3)
t
# O equivalentemente
t = 1, 2, 3
t
t[1:]
# - Las tuplas, a diferencia de las listas, no pueden ser modificadas (son objetos inmutables):
# Intentar modificar una tupla
t[1] = 5
# Las tuplas son usadas comúnmente para funciones que devuelven más de un valor.
#
# Por ejemplo, el método `as_integer_ratio()` de los objetos `float`, devuelve el numerador y el denominador en la forma de una tupla:
# as_integer_ratio
0.25.as_integer_ratio()
num, den = 0.25.as_integer_ratio()
num
den
# Ayuda en el método float.as_integer_ratio
help(float.as_integer_ratio)
# También pueden ser usadas como un atajo:
a = (1, 2)
b = (0, 'A')
a, b = b, a
print(a, b)
# # 2. Ciclos o iteraciones
#
# ## 2.1 Ciclos `for`
#
# Las iteraciones son una manera de ejecutar cierto bloque de código repetidamente:
# Planetas, de nuevo
planetas = ['Mercurio', 'Venus', 'Tierra', 'Marte', 'Jupiter', 'Saturno', 'Urano', 'Neptuno']
# Imprimir todos los planetas en la misma línea
for planeta in planetas:
print(planeta, end=', ')
# Para construir un ciclo `for`, se debe especificar:
#
# - el nombre de la variable que va a iterar (planeta),
#
# - el conjunto de valores sobre los que va a iterar la variable (planetas).
#
# Se usa la palabra `in`, en este caso, para hacerle entender a Python que *planeta* va a iterar sobre *planetas*.
#
# El objeto a la derecha de la palabra `in` puede ser cualquier objeto **iterable**. Básicamente, un iterable es cualquier arreglo (listas, tuplas, conjuntos, arreglos de numpy, series de pandas...).
#
# Por ejemplo, queremos hallar la multiplicación de todos los elementos de la siguiente tupla.
multiplicandos = (2, 2, 2, 3, 3, 5)
# +
# Multiplicación como ciclo
producto = 1
for number in multiplicandos:
producto *= number
producto
# -
# Incluso, podemos iterar sobre los caracteres de un string:
s = 'steganograpHy is the practicE of conceaLing a file, message, image, or video within another fiLe, message, image, Or video.'
# Imprimir solo los caracteres en mayúscula, sin espacios, uno seguido de otro
for char in s:
print(char if char.isupper() else '', end='')
# ### 2.1.1 Función `range()`
#
# La función `range()` es una función que devuelve una secuencia de números. Es extremadamente útil para escribir ciclos for.
#
# Por ejemplo, si queremos repetir una acción 5 veces:
# For de 5 iteraciones
for i in range(5):
print('Hola, ¡Mundo!')
help(range)
range(4, 8)
list(range(4, 8)), list(range(4, 8, 2))
# **Ejercicio:**
#
# 1. Escribir una función que devuelva los primeros $n$ elementos de la sucesión de Fibonacci, usando un ciclo `for`.
def fibonacci_for(n):
if n == 1:
fibonacci = [0]
elif n == 2:
fibonacci = [0, 1]
elif n >= 3:
fibonacci = [0, 1]
for i in range(n - 2):
fibonacci.append(fibonacci[-2] + fibonacci[-1])
return fibonacci
fibonacci_for(10)
# ## 2.2 Ciclos `while`
#
# Son otro tipo de ciclos en Python, los cuales iteran hasta que cierta condición deje de cumplirse.
#
# Por ejemplo:
i = 0
while i >= 0:
print(i, end=' ')
# i = i + 1 es equivalente a i += 1
i += 1
# El argumento de un ciclo `while` se evalúa como una condición lógica, y el ciclo se ejecuta hasta que dicha condición sea **False**.
# **Ejercicio:**
#
# 1. Escribir una función que devuelva los primeros $n$ elementos de la sucesión de Fibonacci, usando un ciclo `while`.
#
# 2. Escribir una función que devuelva los elementos menores a cierto número $x$ de la sucesión de Fibonacci, usando un ciclo `while`.
def fibonacci_while(n):
if n == 1:
fibonacci = [0]
elif n == 2:
fibonacci = [0, 1]
elif n >= 3:
i = 2
fibonacci = [0, 1]
while i < n:
fibonacci.append(fibonacci[-2] + fibonacci[-1])
i += 1
return fibonacci
fibonacci_while(10)
# ## Pausa: Recursión
#
# Una manera adicional de ejecutar iteraciones se conoce como *recursión*, y sucede cuando definimos una función en términos de sí misma.
#
# Por ejemplo, el $n$-ésimo número de la secuencia de Fibonacci, recursivamente sería:
def fibonacci_recursive(n):
if n == 1:
return 0
elif n == 2:
return 1
else:
return fibonacci_recursive(n - 1) + fibonacci_recursive(n - 2)
fibonacci_recursive(10)
# ## 2.3 List comprehensions (no encuentro una traducción suficientemente buena de esto)
#
# List comprehension son una de las características más chidas de Python. La manera más fácil de entenderla, como muchas cosas, es revisando ejemplos:
# Primero, con ciclo for: listar los cuadrados de los 10 dígitos
# Ahora con una list comprehension
# Podemos agregar, incluso, condicionales:
planetas
# Ejemplo con los planetas
[planeta for planeta in planetas if len(planeta) < 7]
# Se puede usar para dar formato:
# str.upper()
# Es supremamente importante aprender esto, ya que es ampliamente utilizado y ayuda a reducir muchísimas líneas de código.
#
# Ejemplo: escribir la siguiente función usando un ciclo for.
def cuantos_negativos(iterable):
"""
Devuelve el número de números negativos en el iterable dado.
>>> cuantos_negativos([5, -1, -2, 0, 3])
2
"""
pass
cuantos_negativos([5, -1, -2, 0, 3])
# Ahora, con list comprehensions:
def cuantos_negativos(iterable):
"""
Devuelve el número de números negativos en el iterable dado.
>>> cuantos_negativos([5, -1, -2, 0, 3])
2
"""
pass
# Probar la función
cuantos_negativos([5, -1, -2, 0, 3])
# # 3. Strings y diccionarios
#
# ## 3.1 Strings
#
# Si hay algo en lo que Python es la ley es manipulando Strings. En esta sección veremos algunos de los métodos de los objetos tipo string, y operaciones de formateo (muy útiles en la limpieza de bases de datos, por cierto).
# ### 3.1.1 Sintaxis string
#
# Ya hemos visto varios ejemplos involucrando strings anteriormente. Solo para recordar:
x = 'Pluton es un planeta'
y = "Pluton es un planeta"
x == y
# Hay casos particulares para preferir una u otra:
#
# - Las comillas dobles son convenientes si tu string contiene un apóstrofe.
#
# - De manera similar, se puede crear fácilmente un string que contiene comillas dobles englobándolo en comillas simples.
#
# Ejemplos:
print("Pluto's a planet!")
print('My dog is named "Pluto"')
print('Pluto\'s a planet!')
print("My dog is named \"Pluto\"")
# ### 3.1.2 Los strings son iterables
#
# Los objetos tipo strings son cadenas de caracteres. Casi todo lo que vimos que le podíamos aplicar a una lista, se lo podemos aplicar a un string.
# string de ejemplo
# Indexado
# Indexado multiple
# ¿Cuántos caracteres tiene?
# También podemos iterar sobre ellos
# Sin embargo, una diferencia principal con las listas, es que son inmutables (no los podemos modificar).
# ### 3.1.3 Métodos de los strings
#
# Como las listas, los objetos tipo `str` tienen una gran cantidad de métodos útiles.
#
# Veamos algunos:
# string de ejemplo
# EN MAYÚSCULAS
# en minúsculas
# pregunta: comienza con?
# pregunta: termina con?
# #### Entre listas y strings: métodos `split()` y `join()`
#
# El método `str.split()` convierte un string en una lista de strings más pequeños.
#
# Esto es supremamente útil para tomar de un string cada una de sus palabras:
# Palabras de una frase
# O para obtener cierta información:
# Año, mes y día de una fecha especificada como string
# `str.join()` nos sirve para devolver los pasos.
#
# Teniendo una lista de pequeños strings, la podemos convertir en un solo string usando el string sobre el que se llama como separador:
# Con la fecha...
# ### 3.1.4 Concatenación de strings
#
# Python nos permite concatenar strings con el operador `+`:
# Ejemplo
# Sin embargo, hay que tener cuidado:
# Concatenar un string con un número
# ¿cómo concatenar lo anterior?
# ## 3.2 Diccionarios
#
# Los diccionarios son otros objetos de Python que mapean llaves a elementos:
numeros = {'uno': 1, 'dos': 2, 'tres': 3}
# En este caso, los strings "uno", "dos", y "tres" son las llaves, y los números 1, 2 y 3 son sus valores correspondientes.
#
# Los valores son accesados con brackets, similarmente a las listas:
numeros['uno']
# Usamos una sintaxis similar para añadir otro par llave, valor
numeros['cuatro'] = 4
numeros
# O cambiar el valor asociado a una llave existente
numeros['uno'] = '1'
numeros
# ### Navegando entre listas, tuplas, diccionarios: `zip`
# Supongamos que tenemos dos listas que se corresponden:
key_list = ['name', 'age', 'height', 'weight', 'hair', 'eyes', 'has dog']
value_list = ['Esteban', 30, 1.81, 75, 'black', 'brown', True]
# ¿Cómo puedo asociar estos valores en un diccionario? Con `zip`:
# Primero, obtener la lista de pares
# Después obtener diccionario de relaciones
# Al ser los diccionarios iterables, puedo iterar sobre ellos (valga la redundancia)
# Iterar sobre diccionario
# Iterar sobre valores
# Iterar sobre pares llave-valor
# ___
# - Quiz 1 al comenzar la siguiente clase. Comprende clases 1 y 2.
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#808080; background:#fff;">
# Created with Jupyter by Esteban Jiménez Rodríguez.
# </footer>
| 18,911 |
/Data_Structures/arrays/Duplicate-Number.ipynb | b14d3fc53738515f2f5384dd6594c4a844eec897 | [] | no_license | Data-Semi/DataStructure-LessonNotes | https://github.com/Data-Semi/DataStructure-LessonNotes | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 3,810 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] graffitiCellId="id_jjzm8pq"
# ### Problem Statement
#
# You have been given an array of `length = n`. The array contains integers from `0` to `n - 2`. Each number in the array is present exactly once except for one number which is present twice. Find and return this duplicate number present in the array
#
# **Example:**
# * `arr = [0, 2, 3, 1, 4, 5, 3]`
# * `output = 3` (because `3` is present twice)
#
# The expected time complexity for this problem is `O(n)` and the expected space-complexity is `O(1)`.
# + graffitiCellId="id_hjobo20"
def duplicate_number(arr):
"""
:param - array containing numbers in the range [0, len(arr) - 2]
return - the number that is duplicate in the arr
"""
pass
# + [markdown] graffitiCellId="id_t54gljc"
# <span class="graffiti-highlight graffiti-id_t54gljc-id_6q2yj6n"><i></i><button>Show Solution</button></span>
# + graffitiCellId="id_32apeg6"
def test_function(test_case):
arr = test_case[0]
solution = test_case[1]
output = duplicate_number(arr)
if output == solution:
print("Pass")
else:
print("Fail")
# + graffitiCellId="id_5b4ou9d"
arr = [0, 0]
solution = 0
test_case = [arr, solution]
test_function(test_case)
# + graffitiCellId="id_kvkeije"
arr = [0, 2, 3, 1, 4, 5, 3]
solution = 3
test_case = [arr, solution]
test_function(test_case)
# + graffitiCellId="id_vfijgc0"
arr = [0, 1, 5, 4, 3, 2, 0]
solution = 0
test_case = [arr, solution]
test_function(test_case)
# + graffitiCellId="id_w6gda6p"
arr = [0, 1, 5, 5, 3, 2, 4]
solution = 5
test_case = [arr, solution]
test_function(test_case)
| 1,881 |
/6_function.ipynb | f0f001ee2af6ead7a3055d9b4b0236603c7d6f9c | [] | no_license | wssunn/Python-Language | https://github.com/wssunn/Python-Language | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 7,201 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# ### 迭代器
#
# ```python
# x = [1, 2, 3]; it = iter(x)
# print(it.__next__()) #输出1
# print(it.__next__()) #输出2
# print(it.__next__()) #输出3
# print(it.__next__()) #返回 StopIteration
# ```
# ## function
def myfunc():
'''the doc string'''
pass
print(myfunc.__doc__) #输出:the doc string
# ### 1.函数的调用方式
#
# +
#星号(*)后面的形参 必须 要指定参数名称
def recorder(name, *, age):
print(name, ' ', age)
#recorder('Gary', 32) #错误,没有指定形参age
recorder('Gary', age=32) #正确,指定形参age
#有默认值的形参必须放在没有默认值的后面
def recorder(name, age=32): #正确
pass
#def recorder(age=32, name): #错误
#func(*参数) 解包tuple或者list
#使用解包列表或元组,则解包参数不可修改
def recorder(*person):
for a in person:
if not isinstance(a, (int, str)):
raise TypeError('bad operand type')
#注:参数不可修改
print(person[0], person[1])
mylist = ['Gary', 32]; recorder(*mylist) #输出:Gary 32
mytuple = ['Gary', 32]; recorder(*mytuple) #输出:Gary 32
#func(**dict) 解包字典
def recorder(**person):
for a in person.values():
if not isinstance(a, (int, str)):
raise TypeError('bad operand type')
print(person['name'], person['age'])
mydict = {'age':32, 'name':'gary'}
recorder(**mydict)
recorder(age=32, name='gary')
# -
# #### 1.1混合使用
# +
# 单个形参在前,列表元组字典在后,调用不需要指定单个形参名字
def recorder(ttt, *person1, **person2):
if len(person1) != 0:
print(person1[0], person1[1])
if len(person2) != 0:
print(person2['name'], person2['age'])
recorder('abc', 'Gary', 32) #传入不指定形参的实参,由person1接收
recorder('abc', name='Gary', age=32) #传入指定形参的实参,由person2接收
recorder(ttt='abc') #不需要指定
# -
# ### 2.生成器函数(generator) 见5
#
# 生成器对象只能迭代一次,所以对可迭代函数的结果只能取一次。迭代器对象(iter)可迭代多次。
# +
#匿名函数
myfunc = lambda x,y: x+y
print(myfunc(1, 2)) #输出3
# reduce函数:按照sequence的顺序,依次调用function,每次调用传入两个参数
# 一个是sequence当前元素,一个是上一个元素在function的返回值
from functools import reduce
a = reduce(lambda x,y: x+y, range(1, 101)); print(a) #输出5050
b = map(lambda x: x**2, [1, 2, 3, 4, 5]); print(list(b)) #输出[1, 4, 9, 16, 25]
#map函数: 可以处理多个函数,lambda函数的参数个数要和列表(序列)数据个数相同
# 当两个序列长度不相等,以最小长度对所有序列进行提取
c = map(lambda x,y: x+y, [1, 2, 3], [4, 5, 6, 7]); print(list(c)) #输出[5, 7, 9]
#filter函数: 把序列对象中的元素依次放到处理函数中,True则留下
t = filter(lambda x: x%2==0, range(10))
print(list(t)) #输出[0, 2, 4, 6, 8]
#生成器对象只能迭代一次,结果只能取一次
print(list(t)) #输出[]
# -
# ### 3.偏函数
# 偏函数用于截取原函数功能。可以使用一个原函数,将其封装多个偏函数。
# +
from functools import partial
def recorder(name, age):
print(name, ' ', age)
partial_recorder = partial(recorder, name='Gary')
partial_recorder(age=32)
# -
# ### 4. eval与exec函数
# eval执行要返回结果,适合放置有结果返回的语句; eval()用于将'[]'(包含组合对象的字符串)转换为[]组合对象
#
# exec执行完不返回结果,适合放置运行后没有结果的语句
a = exec('2+3'); print(a) #返回None
a = eval('2+3'); print(a) #返回5
#eval()用于将'[]'(包含组合对象的字符串)转换为[]组合对象
b = '[1, 2, 3]'; print(eval(b)) #返回[1, 2, 3]
c = '"hello"'; print(eval(c)) #返回hello
# ### 5. 生成器函数(generaotr)
#
# 1. 迭代器函数(iterator)将所有的内容放在内存里面,使用next函数来遍历,节约系统运算资源
# 2. 生成器函数(generaotr)不会把内容放在内存里,调用next时会计算然后立刻销毁,节约内存
# +
#使用yield语句返回
def print_list(a):
for i in a:
yield i
for i in print_list([1, 2, 3]):
print(i) #输出1, 2, 3
#使用()
a = (x**2 for x in range(3))
for i in a:
print(i) #输出0, 1, 4
# -
# ### 6.变量的作用域
# 1. L:本地作用域,当前函数
# 2. E:(函数嵌套)上一层结构中的def的本地作用域
# 3. G:全局作用域,不被任何函数包括
# 4. B:内置作用域,是python内部的命名空间
#
# 代码用到某个变量a,系统内部会按照LEGB的顺序去不同的作用域里面找变量a,找到后停下来,否则报错
# +
# global语句
| 3,684 |
/2. SKKU/2.DeepLearningBasic/Practice2_Softmax_Classifier/.ipynb_checkpoints/Samsung_SDS_Practice2_Softmax_Classifier-checkpoint.ipynb | 759b6d3e793c77c17de695744cc5f6cc470097c4 | [] | no_license | LeeKeon/AI | https://github.com/LeeKeon/AI | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 125,936 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Practice2. Softmax classifier
# +
import numpy as np
import random
import os
import matplotlib.pyplot as plt
import _pickle as pickle
import time
# set default plot options
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# -
# ## Data preprocessing
from utils import get_CIFAR10_data
X_tr, Y_tr, X_val, Y_val, X_te, Y_te, mean_img = get_CIFAR10_data()
print ('Train data shape : %s, Train labels shape : %s' % (X_tr.shape, Y_tr.shape))
print ('Val data shape : %s, Val labels shape : %s' % (X_val.shape, Y_val.shape))
print ('Test data shape : %s, Test labels shape : %s' % (X_te.shape, Y_te.shape))
# ## Visualize training images
# +
class_names = ['airplane','automobile','bird','cat','deer',
'dog','frog','horse','ship','truck']
images_index = np.int32(np.round(np.random.rand(18,)*10000,0))
fig, axes = plt.subplots(3, 6, figsize=(18, 6),
subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(hspace=0.3, wspace=0.05)
for ax, idx in zip(axes.flat, images_index):
img = (X_tr[idx,:3072].reshape(32, 32, 3) + mean_img.reshape(32, 32, 3))/255.
ax.imshow(img)
ax.set_title(class_names[Y_tr[idx]])
# -
# # 1. Softmax Classifier
# We will implement two version of loss functions for softmax classifier, and test it out on the CIFAR10 dataset.
#
# First, implement the naive softmax loss function with nested loops.
def naive_softmax_loss(Weights,X_data,Y_data,reg):
"""
Inputs have D dimension, there are C classes, and we operate on minibatches of N examples.
Inputs :
- Weights : A numpy array of shape (D,C) containing weights.
- X_data : A numpy array of shape (N,D) contatining a minibatch of data.
- Y_data : A numpy array of shape (N,) containing training labels;
Y[i]=c means that X[i] has label c, where 0<=c<C.
- reg : Regularization strength. (float)
Returns :
- loss as single float
- gradient with respect to Weights; an array of sample shape as Weights
"""
# Initialize the loss and gradient to zero
softmax_loss = 0.0
dWeights = np.zeros_like(Weights)
#print(dWeights.shape)
####################################################################################################
# TODO : Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. #
# If you are not careful here, it is easy to run into numeric instability. #
# Don't forget the regularization. #
#---------------------------------------WRITE YOUR CODE--------------------------------------------#
#length1 = len(X_data)
#length2 = len(X_data[0])
traing_times = X_data.shape[0]
dim,num_class = Weights.shape
print('traing_times',traing_times)
print('dimen, num_class',dim,num_class)
for i in range(traing_times):
#Weight Matmul
score_i = X_data[i].dot(Weights)
#Nomalization - exp 지수값이 너무 커지지 않기 위한
score_i = score_i - np.max(score_i)
prob_i = np.exp(score_i) / np.sum(np.exp(score_i))
#Calculate Loss
softmax_loss += -np.log(prob_i[Y_data[i]])
#Loss Evaluste
prob_i[Y_data[i]] -= 1
dWeights += np.dot(X_data[i].reshape(dim,1),prob_i.reshape(1,num_class))
#Regulazation
softmax_loss /= traing_times
softmax_loss += 0.5 * reg * np.sum(Weights*Weights)
#what's this?
dWeights = (1.0/traing_times)*dWeights + reg*Weights
#--------------------------------------END OF YOUR CODE--------------------------------------------#
####################################################################################################
return softmax_loss, dWeights
# Generate a random softmax weight matrix and use it to compute the loss. As a rough sanity check, our loss should be something close to -log(0.1).
W = np.random.randn(3073, 10) * 0.0001
print(W.shape)
print(W.shape[0] )
# +
loss, grad = naive_softmax_loss(W, X_tr, Y_tr, 0.0)
print ('loss :', loss)
print ('sanity check : ', -np.log(0.1))
# -
# The next thing is the vectorized softmax loss function.
def vectorized_softmax_loss(Weights, X_data, Y_data, reg):
softmax_loss = 0.0
dWeights = np.zeros_like(Weights)
####################################################################################################
# TODO : Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. #
# If you are not careful here, it is easy to run into numeric instability. #
# Don't forget the regularization. #
#---------------------------------------WRITE YOUR CODE--------------------------------------------#
tr_length = X_data.shape[0]
#Weight Matmul
score = X_data.dot(Weights)
#print(np.mean(score,axis=0))
#print(np.mean(score,axis=1))
#Nomalization
score -= np.max(score,axis=1).reshape(tr_length,1)
prob = np.exp(score) / np.sum(np.exp(score),axis=1).reshape(tr_length,1)
#Calculate Loss
softmax_loss = -np.sum(np.log(prob[range(tr_length), Y_data]))
#Loss Evaluste
prob[range(tr_length), Y_data] -= 1
dWeights = X_data.T.dot(prob)
#Regularization
softmax_loss /= tr_length
softmax_loss += 0.5*reg*np.sum(Weights*Weights)
dWeights = (1.0/tr_length)*dWeights + reg*Weights
#--------------------------------------END OF YOUR CODE--------------------------------------------#
####################################################################################################
return softmax_loss, dWeights
# Compare two versions. The two versions should compute the same results, but the vectorized version should be much faster.
# +
s_time = time.time()
loss_naive, grad_naive = naive_softmax_loss(W, X_tr, Y_tr, 0.00001)
print ('naive loss : %e with %fs' % (loss_naive, time.time()-s_time))
s_time = time.time()
loss_vectorized, grad_vectorized = vectorized_softmax_loss(W, X_tr, Y_tr, 0.00001)
print ('vectorized loss : %e with %fs' % (loss_vectorized, time.time()-s_time))
print ('loss difference : %f' % np.abs(loss_naive - loss_vectorized))
print ('gradient difference : %f' % np.linalg.norm(grad_naive-grad_vectorized, ord='fro'))
# -
# Now, you should implement the softmax classifier using the comment below with softmax loss function you implemented above.
class Softmax(object):
def __init__(self):
#self.Weights = None
return
def train(self, X_tr_data, Y_tr_data, X_val_data, Y_val_data, lr=1e-3, reg=1e-5, iterations=100, bs=128, verbose=False):
"""
Train this Softmax classifier using stochastic gradient descent.
Inputs have D dimensions, and we operate on N examples.
Inputs :
- X_data : A numpy array of shape (N,D) containing training data.
- Y_data : A numpy array of shape (N,) containing training labels;
Y[i]=c means that X[i] has label 0<=c<C for C classes.
- lr : (float) Learning rate for optimization.
- reg : (float) Regularization strength.
- iterations : (integer) Number of steps to take when optimizing.
- bs : (integer) Number of training examples to use at each step.
- verbose : (boolean) If true, print progress during optimization.
Regurns :
- A list containing the value of the loss function at each training iteration.
"""
num_train, dim = X_tr_data.shape
num_classes = np.max(Y_tr_data)+1
self.Weights = 0.001*np.random.randn(dim, num_classes)
for it in range(iterations):
#X_batch = None
#Y_batch = None
####################################################################################################
# TODO : Sample batch_size elements from the training data and their corresponding labels #
# to use in this round of gradient descent. #
# Store the data in X_batch and their corresponding labels in Y_batch; After sampling #
# X_batch should have shape (dim, batch_size) and Y_batch should have shape (batch_siae,) #
# #
# Hint : Use np.random.choice to generate indicies. #
# Sampling with replacement is faster than sampling without replacement. #
#---------------------------------------WRITE YOUR CODE--------------------------------------------#
#--------------------------------------END OF YOUR CODE--------------------------------------------#
####################################################################################################
# Evaluate loss and gradient
tr_loss, tr_grad = self.loss(X_batch, Y_batch, reg)
# Perform parameter update
####################################################################################################
# TODO : Update the weights using the gradient and the learning rate #
#---------------------------------------WRITE YOUR CODE--------------------------------------------#
#--------------------------------------END OF YOUR CODE--------------------------------------------#
####################################################################################################
if verbose and it % num_iters == 0:
print ('Ieration %d / %d : loss %f ' % (it, num_iters, loss))
def predict(self, X_data):
"""
Use the trained weights of this softmax classifier to predict labels for data points.
Inputs :
- X : A numpy array of shape (N,D) containing training data.
Returns :
- Y_pred : Predicted labels for the data in X. Y_pred is a 1-dimensional array of length N,
and each element is an integer giving the predicted class.
"""
Y_pred = np.zeros(X_data.shape[0])
####################################################################################################
# TODO : Implement this method. Store the predicted labels in Y_pred #
#---------------------------------------WRITE YOUR CODE--------------------------------------------#
#--------------------------------------END OF YOUR CODE--------------------------------------------#
####################################################################################################
return Y_pred
def get_accuracy(self, X_data, Y_data):
"""
Use X_data and Y_data to get an accuracy of the model.
Inputs :
- X_data : A numpy array of shape (N,D) containing input data.
- Y_data : A numpy array of shape (N,) containing a true label.
Returns :
- Accuracy : Accuracy of input data pair [X_data, Y_data].
"""
####################################################################################################
# TODO : Implement this method. Calculate an accuracy of X_data using Y_data and predict Func #
#---------------------------------------WRITE YOUR CODE--------------------------------------------#
#--------------------------------------END OF YOUR CODE--------------------------------------------#
####################################################################################################
return accuracy
def loss(self, X_batch, Y_batch, reg):
return vectorized_softmax_loss(self.Weights, X_batch, Y_batch, reg)
# Use the validatoin set to tune hyperparemeters (regularizatoin strength and learning rate).
# You should experiment with different range for the learning rates and regularization strength;
# if you are careful you should be able to get a classification accuracy of over 0.35 on the validatoin set.
# +
# results is dictionary mapping tuples of the form.
# (learning_rate, regularization_strength) to tuple of the form (training_accuracy, validation_accuracy).
# The accuracy is simply the fraction of data points that are correctly classified.
results = {}
best_val = -1
best_softmax = None
learning_rates = [1e-8, 1e-7, 5e-7, 1e-6]
regularization_strengths = [5e2, 1e3, 1e4, 5e4]
#########################################################################################################
# TODO : Write code that chooses the best hyperparameters by tuning on the validation set. #
# For each combination of hyperparemeters, train a Softmax on the training set, #
# compute its accuracy on the training and validatoin sets, and store these numbers in the #
# results dictionary. In addition, store the best validation accuracy in best_val #
# and the Softmax object that achieves this accuracy in best_softmax. #
# #
# Hint : You should use a small value for num_iters as you develop your validation code so that the #
# Softmax don't take much time to train; once you are confident that your validation code works, #
# you should rerun the validation code with a larger value for num_iter. #
#------------------------------------------WRITE YOUR CODE----------------------------------------------#
#softmax = Softmax()
#-----------------------------------------END OF YOUR CODE----------------------------------------------#
#########################################################################################################
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print ('lr %e reg %e train accuracy : %f, val accuracy : %f ' % (lr, reg, train_accuracy, val_accuracy))
print ('best validatoin accuracy achieved during cross-validation :', best_val)
# -
# Evaluate the best softmax on testset.
# +
Y_te_pred = best_softmax.predict(X_te)
test_accuracy = np.mean(Y_te == Y_te_pred)
print ('softmax on raw pixels final test set accuracy : ', test_accuracy)
# -
# ## Visualize test results
# Visualize (Image, Predicted label) pairs of the best softmax model. Results may are not good because we train simple softmax classifier model.
# +
class_names = ['airplane','automobile','bird','cat','deer',
'dog','frog','horse','ship','truck']
images_index = np.int32(np.round(np.random.rand(18,)*1000,0))
fig, axes = plt.subplots(3, 6, figsize=(18, 6),
subplot_kw={'xticks': [], 'yticks': []})
fig.subplots_adjust(hspace=0.3, wspace=0.05)
for ax, idx in zip(axes.flat, images_index):
img = (X_te[idx,:3072].reshape(32, 32, 3) + mean_img.reshape(32, 32, 3))/255.
ax.imshow(img)
ax.set_title(class_names[Y_te_pred[idx]])
# -
# ## Visualize test results
# Visualize the learned weights for each class. Depending on your choice of learning rate and regularization strength, these may or may not be nice to look at.
# +
w = best_softmax.Weights[:-1, :]
w = w.reshape(32,32,3,10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in range(10):
plt.subplot(2,5,i+1)
wimg=255.0*(w[:,:,:,i].squeeze() - w_min)/(w_max-w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
# -
| 16,856 |
/13_sql.ipynb | 72fd7d34098246695262ad7fb85aa4ad1c4d829b | [] | no_license | kisslitsyn/ya.practicum | https://github.com/kisslitsyn/ya.practicum | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 43,457 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab 2: Inference in Graphical Models
#
# ### Machine Learning 2 (2016/2017)
#
# * The lab exercises should be made in groups of two people or individually.
# * The hand-in deadline is Wednesday, May 10, 23:59.
# * Assignment should be sent to p.j.j.p.versteeg@uva.nl. The subject line of your email should be "[ML2_2017] lab#_lastname1\_lastname2".
# * Put your and your teammates' names in the body of the email
# * Attach the .IPYNB (IPython Notebook) file containing your code and answers. Naming of the file follows the same rule as the subject line. For example, if the subject line is "[ML2_2017] lab02\_Bongers\_Blom", the attached file should be "lab02\_Bongers\_Blom.ipynb". Only use underscores ("\_") to connect names, otherwise the files cannot be parsed.
#
# Notes on implementation:
#
# * You should write your code and answers in an IPython Notebook: http://ipython.org/notebook.html. If you have problems, please ask or e-mail Philip.
# * For some of the questions, you can write the code directly in the first code cell that provides the class structure.
# * Among the first lines of your notebook should be "%pylab inline". This imports all required modules, and your plots will appear inline.
# * NOTE: test your code and make sure we can run your notebook / scripts!
# ### Introduction
# In this assignment, we will implement the sum-product and max-sum algorithms for factor graphs over discrete variables. The relevant theory is covered in chapter 8 of Bishop's PRML book, in particular section 8.4. Read this chapter carefuly before continuing!
#
# We will first implement sum-product and max-sum and apply it to a simple poly-tree structured factor graph for medical diagnosis. Then, we will implement a loopy version of the algorithms and use it for image denoising.
#
# For this assignment we recommended you stick to numpy ndarrays (constructed with np.array, np.zeros, np.ones, etc.) as opposed to numpy matrices, because arrays can store n-dimensional arrays whereas matrices only work for 2d arrays. We need n-dimensional arrays in order to store conditional distributions with more than 1 conditioning variable. If you want to perform matrix multiplication on arrays, use the np.dot function; all infix operators including *, +, -, work element-wise on arrays.
# ## Part 1: The sum-product algorithm
#
# We will implement a datastructure to store a factor graph and to facilitate computations on this graph. Recall that a factor graph consists of two types of nodes, factors and variables. Below you will find some classes for these node types to get you started. Carefully inspect this code and make sure you understand what it does; you will have to build on it later.
# +
# %pylab inline
class Node(object):
"""
Base-class for Nodes in a factor graph. Only instantiate sub-classes of Node.
"""
def __init__(self, name):
# A name for this Node, for printing purposes
self.name = name
# Neighbours in the graph, identified with their index in this list.
# i.e. self.neighbours contains neighbour 0 through len(self.neighbours) - 1.
self.neighbours = []
# Reset the node-state (not the graph topology)
self.reset()
def reset(self):
# Incoming messages; a dictionary mapping neighbours to messages.
# That is, it maps Node -> np.ndarray.
self.in_msgs = {}
# A set of neighbours for which this node has pending messages.
# We use a python set object so we don't have to worry about duplicates.
self.pending = set([])
def add_neighbour(self, nb):
self.neighbours.append(nb)
def send_sp_msg(self, other):
# To be implemented in subclass.
raise Exception('Method send_sp_msg not implemented in base-class Node')
def send_ms_msg(self, other):
# To be implemented in subclass.
raise Exception('Method send_ms_msg not implemented in base-class Node')
def receive_msg(self, other, msg):
# Store the incomming message, replacing previous messages from the same node
self.in_msgs[other] = msg
# TODO: add pending messages
# self.pending.update(...)
def __str__(self):
# This is printed when using 'print node_instance'
return self.name
class Variable(Node):
def __init__(self, name, num_states):
"""
Variable node constructor.
Args:
name: a name string for this node. Used for printing.
num_states: the number of states this variable can take.
Allowable states run from 0 through (num_states - 1).
For example, for a binary variable num_states=2,
and the allowable states are 0, 1.
"""
self.num_states = num_states
# Call the base-class constructor
super(Variable, self).__init__(name)
def set_observed(self, observed_state):
"""
Set this variable to an observed state.
Args:
observed_state: an integer value in [0, self.num_states - 1].
"""
# Observed state is represented as a 1-of-N variable
# Could be 0.0 for sum-product, but log(0.0) = -inf so a tiny value is preferable for max-sum
self.observed_state[:] = 0.000001
self.observed_state[observed_state] = 1.0
def set_latent(self):
"""
Erase an observed state for this variable and consider it latent again.
"""
# No state is preferred, so set all entries of observed_state to 1.0
# Using this representation we need not differentiate between observed and latent
# variables when sending messages.
self.observed_state[:] = 1.0
def reset(self):
super(Variable, self).reset()
self.observed_state = np.ones(self.num_states)
def marginal(self, Z=None):
"""
Compute the marginal distribution of this Variable.
It is assumed that message passing has completed when this function is called.
Args:
Z: an optional normalization constant can be passed in. If None is passed, Z is computed.
Returns: marginal, Z. The first is a numpy array containing the normalized marginal distribution.
Z is either equal to the input Z, or computed in this function (if Z=None was passed).
"""
# TODO: compute marginal
return None, Z
def send_sp_msg(self, other):
# TODO: implement Variable -> Factor message for sum-product
pass
def send_ms_msg(self, other):
# TODO: implement Variable -> Factor message for max-sum
pass
class Factor(Node):
def __init__(self, name, f, neighbours):
"""
Factor node constructor.
Args:
name: a name string for this node. Used for printing
f: a numpy.ndarray with N axes, where N is the number of neighbours.
That is, the axes of f correspond to variables, and the index along that axes corresponds to a value of that variable.
Each axis of the array should have as many entries as the corresponding neighbour variable has states.
neighbours: a list of neighbouring Variables. Bi-directional connections are created.
"""
# Call the base-class constructor
super(Factor, self).__init__(name)
assert len(neighbours) == f.ndim, 'Factor function f should accept as many arguments as this Factor node has neighbours'
for nb_ind in range(len(neighbours)):
nb = neighbours[nb_ind]
assert f.shape[nb_ind] == nb.num_states, 'The range of the factor function f is invalid for input %i %s' % (nb_ind, nb.name)
self.add_neighbour(nb)
nb.add_neighbour(self)
self.f = f
def send_sp_msg(self, other):
# TODO: implement Factor -> Variable message for sum-product
pass
def send_ms_msg(self, other):
# TODO: implement Factor -> Variable message for max-sum
pass
# -
# ### 1.1 Instantiate network (10 points)
# Convert the directed graphical model ("Bayesian Network") shown below to a factor graph. Instantiate this graph by creating Variable and Factor instances and linking them according to the graph structure.
# To instantiate the factor graph, first create the Variable nodes and then create Factor nodes, passing a list of neighbour Variables to each Factor.
# Use the following prior and conditional probabilities.
#
# $$
# p(\verb+Influenza+) = 0.05 \\\\
# p(\verb+Smokes+) = 0.2 \\\\
# $$
#
# $$
# p(\verb+SoreThroat+ = 1 | \verb+Influenza+ = 1) = 0.3 \\\\
# p(\verb+SoreThroat+ = 1 | \verb+Influenza+ = 0) = 0.001 \\\\
# p(\verb+Fever+ = 1| \verb+Influenza+ = 1) = 0.9 \\\\
# p(\verb+Fever+ = 1| \verb+Influenza+ = 0) = 0.05 \\\\
# p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 1, \verb+Smokes+ = 1) = 0.99 \\\\
# p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 1, \verb+Smokes+ = 0) = 0.9 \\\\
# p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 0, \verb+Smokes+ = 1) = 0.7 \\\\
# p(\verb+Bronchitis+ = 1 | \verb+Influenza+ = 0, \verb+Smokes+ = 0) = 0.0001 \\\\
# p(\verb+Coughing+ = 1| \verb+Bronchitis+ = 1) = 0.8 \\\\
# p(\verb+Coughing+ = 1| \verb+Bronchitis+ = 0) = 0.07 \\\\
# p(\verb+Wheezing+ = 1| \verb+Bronchitis+ = 1) = 0.6 \\\\
# p(\verb+Wheezing+ = 1| \verb+Bronchitis+ = 0) = 0.001 \\\\
# $$
from IPython.core.display import Image
Image(filename='bn.png')
# YOUR ANSWER HERE
# ### 1.2 Factor to variable messages (20 points)
# Write a method `send_sp_msg(self, other)` for the Factor class, that checks if all the information required to pass a message to Variable `other` is present, computes the message and sends it to `other`. "Sending" here simply means calling the `receive_msg` function of the receiving node (we will implement this later). The message itself should be represented as a numpy array (np.array) whose length is equal to the number of states of the variable.
#
# An elegant and efficient solution can be obtained using the n-way outer product of vectors. This product takes n vectors $\mathbf{x}^{(1)}, \ldots, \mathbf{x}^{(n)}$ and computes a $n$-dimensional tensor (ndarray) whose element $i_0,i_1,...,i_n$ is given by $\prod_j \mathbf{x}^{(j)}_{i_j}$. In python, this is realized as `np.multiply.reduce(np.ix_(*vectors))` for a python list `vectors` of 1D numpy arrays. Try to figure out how this statement works -- it contains some useful functional programming techniques. Another function that you may find useful in computing the message is `np.tensordot`.
# ### 1.3 Variable to factor messages (10 points)
#
# Write a method `send_sp_message(self, other)` for the Variable class, that checks if all the information required to pass a message to Variable var is present, computes the message and sends it to factor.
# ### 1.4 Compute marginal (10 points)
# Later in this assignment, we will implement message passing schemes to do inference. Once the message passing has completed, we will want to compute local marginals for each variable.
# Write the method `marginal` for the Variable class, that computes a marginal distribution over that node.
# ### 1.5 Receiving messages (10 points)
# In order to implement the loopy and non-loopy message passing algorithms, we need some way to determine which nodes are ready to send messages to which neighbours. To do this in a way that works for both loopy and non-loopy algorithms, we make use of the concept of "pending messages", which is explained in Bishop (8.4.7):
# "we will say that a (variable or factor)
# node a has a message pending on its link to a node b if node a has received any
# message on any of its other links since the last time it send (sic) a message to b. Thus,
# when a node receives a message on one of its links, this creates pending messages
# on all of its other links."
#
# Keep in mind that for the non-loopy algorithm, nodes may not have received any messages on some or all of their links. Therefore, before we say node a has a pending message for node b, we must check that node a has received all messages needed to compute the message that is to be sent to b.
#
# Modify the function `receive_msg`, so that it updates the self.pending variable as described above. The member self.pending is a set that is to be filled with Nodes to which self has pending messages. Modify the `send_msg` functions to remove pending messages as they are sent.
# ### 1.6 Inference Engine (10 points)
# Write a function `sum_product(node_list)` that runs the sum-product message passing algorithm on a tree-structured factor graph with given nodes. The input parameter `node_list` is a list of all Node instances in the graph, which is assumed to be ordered correctly. That is, the list starts with a leaf node, which can always send a message. Subsequent nodes in `node_list` should be capable of sending a message when the pending messages of preceding nodes in the list have been sent. The sum-product algorithm then proceeds by passing over the list from beginning to end, sending all pending messages at the nodes it encounters. Then, in reverse order, the algorithm traverses the list again and again sends all pending messages at each node as it is encountered. For this to work, you must initialize pending messages for all the leaf nodes, e.g. `influenza_prior.pending.add(influenza)`, where `influenza_prior` is a Factor node corresponding the the prior, `influenza` is a Variable node and the only connection of `influenza_prior` goes to `influenza`.
#
#
#
# +
# YOUR ANSWER HERE
# -
# ### 1.7 Observed variables and probabilistic queries (15 points)
# We will now use the inference engine to answer probabilistic queries. That is, we will set certain variables to observed values, and obtain the marginals over latent variables. We have already provided functions `set_observed` and `set_latent` that manage a member of Variable called `observed_state`. Modify the `Variable.send_msg` and `Variable.marginal` routines that you wrote before, to use `observed_state` so as to get the required marginals when some nodes are observed.
# ### 1.8 Sum-product and MAP states (5 points)
# A maximum a posteriori state (MAP-state) is an assignment of all latent variables that maximizes the probability of latent variables given observed variables:
# $$
# \mathbf{x}_{\verb+MAP+} = \arg\max _{\mathbf{x}} p(\mathbf{x} | \mathbf{y})
# $$
# Could we use the sum-product algorithm to obtain a MAP state? If yes, how? If no, why not?
#
# __YOUR ANSWER HERE__
# ## Part 2: The max-sum algorithm
# Next, we implement the max-sum algorithm as described in section 8.4.5 of Bishop.
# ### 2.1 Factor to variable messages (10 points)
# Implement the function `Factor.send_ms_msg` that sends Factor -> Variable messages for the max-sum algorithm. It is analogous to the `Factor.send_sp_msg` function you implemented before.
# ### 2.2 Variable to factor messages (10 points)
# Implement the `Variable.send_ms_msg` function that sends Variable -> Factor messages for the max-sum algorithm.
# ### 2.3 Find a MAP state (10 points)
#
# Using the same message passing schedule we used for sum-product, implement the max-sum algorithm. For simplicity, we will ignore issues relating to non-unique maxima. So there is no need to implement backtracking; the MAP state is obtained by a per-node maximization (eq. 8.98 in Bishop). Make sure your algorithm works with both latent and observed variables.
# +
# YOUR ANSWER HERE
# -
# ## Part 3: Image Denoising and Loopy BP
#
# Next, we will use a loopy version of max-sum to perform denoising on a binary image. The model itself is discussed in Bishop 8.3.3, but we will use loopy max-sum instead of Iterative Conditional Modes as Bishop does.
#
# The following code creates some toy data: `im` is a quite large binary image and `test_im` is a smaller synthetic binary image. Noisy versions are also provided.
# +
from pylab import imread, gray
# Load the image and binarize
im = np.mean(imread('dalmatian1.png'), axis=2) > 0.5
imshow(im)
gray()
# Add some noise
noise = np.random.rand(*im.shape) > 0.9
noise_im = np.logical_xor(noise, im)
figure()
imshow(noise_im)
test_im = np.zeros((10,10))
#test_im[5:8, 3:8] = 1.0
#test_im[5,5] = 1.0
figure()
imshow(test_im)
# Add some noise
noise = np.random.rand(*test_im.shape) > 0.9
noise_test_im = np.logical_xor(noise, test_im)
figure()
imshow(noise_test_im)
# -
# ### 3.1 Construct factor graph (10 points)
# Convert the Markov Random Field (Bishop, fig. 8.31) to a factor graph and instantiate it.
# +
# YOUR ANSWER HERE
# -
# ### 3.2 Loopy max-sum (10 points)
# Implement the loopy max-sum algorithm, by passing messages from randomly chosen nodes iteratively until no more pending messages are created or a maximum number of iterations is reached.
#
# Think of a good way to initialize the messages in the graph.
# +
# YOUR ANSWER hErE
| 17,297 |
/reproductions/Example/Example_05_04.ipynb | c4c2b66bffc7496da634cfb7e89f9f33acaf7ac2 | [] | no_license | Sikhu-Ntaka/Skogestad-Python | https://github.com/Sikhu-Ntaka/Skogestad-Python | 0 | 0 | null | 2020-01-29T13:15:55 | 2020-01-29T13:05:41 | null | Jupyter Notebook | false | false | .py | 1,731 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linear Regression [35pts (+5 bonus)]
#
# ## Introduction
# One of the most widespread regression tools is the simple but powerful linear regression. In this notebook, you will engineer the Pittsburgh bus data into numerical features and use them to predict the number of minutes until the bus reaches the bus stop at Forbes and Morewood.
#
# Notebook restriction: you may not use scikit-learn for this notebook.
#
# ## Q1: Labeling the Dataset [8pts]
#
# You may have noticed that the Pittsburgh bus data has a predictions table with the TrueTime predictions on arrival time, however it does not have the true label: the actual number of minutes until a bus reaches Forbes and Morewood. You will have to generate this yourself.
#
# Using the `all_trips` function that you implemented in homework 2, you can split the dataframe into separate trips. You will first process each trip into a form more natural for the regression setting. For each trip, you will need to locate the point at which a bus passes the bus stop to get the time at which the bus passes the bus stop. From here, you can calculate the true label for all prior datapoints, and throw out the rest.
#
# ### Importing functions from homework 2
#
# Using the menu in Jupyter, you can import code from your notebook as a Python script using the following steps:
# 1. Click File -> Download as -> Python (.py)
# 2. Save file (time_series.py) in the same directory as this notebook
# 3. (optional) Remove all test code (i.e. lines between AUTOLAB_IGNORE macros) from the script for faster loading time
# 4. Import from the notebook with `from time_series import function_name`
#
# ### Specifications
#
# 1. To determine when the bus passes Morewood, we will use the Euclidean distance as a metric to determine how close the bus is to the bus stop.
# 2. We will assume that the row entry with the smallest Euclidean distance to the bus stop is when the bus reaches the bus stop, and that you should truncate all rows that occur **after** this entry. In the case where there are multiple entries with the exact same minimal distance, you should just consider the first one that occurs in the trip (so truncate everything after the first occurance of minimal distance).
# 3. Assume that the row with the smallest Euclidean distance to the bus stop is also the true time at which the bus passes the bus stop. Using this, create a new column called `eta` that contains for each row, the number of minutes until the bus passes the bus stop (so the last row of every trip will have an `eta` of 0).
# 4. Make sure your `eta` is numerical and not a python timedelta object.
import pandas as pd
import numpy as np
import scipy.linalg as la
from collections import Counter
import datetime
# AUTOLAB_IGNORE_START
from time_series import load_data, split_trips
vdf, _ = load_data('bus_train.db')
all_trips = split_trips(vdf)
# AUTOLAB_IGNORE_STOP
#https://stackoverflow.com/questions/4983258/python-how-to-check-list-monotonicity
pd.options.mode.chained_assignment = None
def label_and_truncate(trip, bus_stop_coordinates):
""" Given a dataframe of a trip following the specification in the previous homework assignment,
generate the labels and throw away irrelevant rows.
Args:
trip (dataframe): a dataframe from the list outputted by split_trips from homework 2
stop_coordinates ((float, float)): a pair of floats indicating the (latitude, longitude)
coordinates of the target bus stop.
Return:
(dataframe): a labeled trip that is truncated at Forbes and Morewood and contains a new column
called `eta` which contains the number of minutes until it reaches the bus stop.
"""
result_lst =pd.DataFrame()
lat=(trip["lat"]-bus_stop_coordinates[0])**2
lon =(trip["lon"]-bus_stop_coordinates[1])**2
dist = np.array(lat+lon)
result_lst=trip[:np.argmin(dist)+1]
passing_time = np.array(result_lst.tail(1).index)
ongoing_time = np.array(result_lst.index)
etas = pd.to_numeric((passing_time-ongoing_time).astype('timedelta64[m]'))
result_lst["eta"]=etas
return result_lst
pass
#trip.index[:np.argmin(dist)+1]
# AUTOLAB_IGNORE_START
morewood_coordinates = (40.444671114203, -79.94356058465502) # (lat, lon)
labeled_trips = [label_and_truncate(trip, morewood_coordinates) for trip in all_trips]
# print(len(labeled_trips))
labeled_vdf = pd.concat(labeled_trips).reset_index()
# We remove datapoints that make no sense (ETA more than 10 hours)
labeled_vdf = labeled_vdf[labeled_vdf["eta"] < 10*60].reset_index(drop=True)
print(Counter([len(t) for t in labeled_trips]))
print(labeled_vdf.head())
# AUTOLAB_IGNORE_STOP
# For our implementation, this returns the following output
# ```python
# >>> Counter([len(t) for t in labeled_trips])
# Counter({1: 506, 21: 200, 18: 190, 20: 184, 19: 163, 16: 162, 22: 159, 17: 151, 23: 139, 31: 132, 15: 128, 2: 125, 34: 112, 32: 111, 33: 101, 28: 98, 14: 97, 30: 95, 35: 95, 29: 93, 24: 90, 25: 89, 37: 86, 27: 83, 39: 83, 38: 82, 36: 77, 26: 75, 40: 70, 13: 62, 41: 53, 44: 52, 42: 47, 6: 44, 5: 39, 12: 39, 46: 39, 7: 38, 3: 36, 45: 33, 47: 33, 43: 31, 48: 27, 4: 26, 49: 26, 11: 25, 50: 25, 10: 23, 51: 23, 8: 19, 9: 18, 53: 16, 54: 15, 52: 14, 55: 14, 56: 8, 57: 3, 58: 3, 59: 3, 60: 3, 61: 1, 62: 1, 67: 1})
# >>> labeled_vdf.head()
# tmstmp vid lat lon hdg pid rt des \
# 0 2016-08-11 10:56:00 5549 40.439504 -79.996981 114 4521 61A Swissvale
# 1 2016-08-11 10:57:00 5549 40.439504 -79.996981 114 4521 61A Swissvale
# 2 2016-08-11 10:58:00 5549 40.438842 -79.994733 124 4521 61A Swissvale
# 3 2016-08-11 10:59:00 5549 40.437938 -79.991213 94 4521 61A Swissvale
# 4 2016-08-11 10:59:00 5549 40.437938 -79.991213 94 4521 61A Swissvale
#
# pdist spd tablockid tatripid eta
# 0 1106 0 061A-164 6691 16
# 1 1106 0 061A-164 6691 15
# 2 1778 8 061A-164 6691 14
# 3 2934 7 061A-164 6691 13
# 4 2934 7 061A-164 6691 13
# ```
# ## Q2: Generating Basic Features [8pts]
# In order to perform linear regression, we need to have numerical features. However, not everything in the bus database is a number, and not all of the numbers even make sense as numerical features. If you use the data as is, it is highly unlikely that you'll achieve anything meaningful.
#
# Consequently, you will perform some basic feature engineering. Feature engineering is extracting "features" or statistics from your data, and hopefully improve the performance if your learning algorithm (in this case, linear regression). Good features can often make up for poor model selection and improve your overall predictive ability on unseen data. In essence, you want to turn your data into something your algorithm understands.
#
# ### Specifications
# 1. The input to your function will be a concatenation of the trip dataframes generated in Q1 with the index dropped (so same structure as the original dataframe, but with an extra column and less rows).
# 2. Linear models typically have a constant bias term. We will encode this as a column of 1s in the dataframe. Call this column 'bias'.
# 2. We will keep the following columns as is, since they are already numerical: pdist, spd, lat, lon, and eta
# 3. Time is a cyclic variable. To encode this as a numerical feature, we can use a sine/cosine transformation. Suppose we have a feature of value f that ranges from 0 to N. Then, the sine and cosine transformation would be $\sin\left(2\pi \frac{f}{N}\right)$ and $\cos\left(2\pi \frac{f}{N}\right)$. For example, the sine transformation of 6 hours would be $\sin\left(2\pi \frac{6}{24}\right)$, since there are 24 hours in a cycle. You should create sine/cosine features for the following:
# * day of week (cycles every week, 0=Monday)
# * hour of day (cycles every 24 hours, 0=midnight)
# * time of day represented by total number of minutes elapsed in the day (cycles every 60*24 minutes, 0=midnight).
# 4. Heading is also a cyclic variable, as it is the ordinal direction in degrees (so cycles every 360 degrees).
# 4. Buses run on different schedules on the weekday as opposed to the weekend. Create a binary indicator feature `weekday` that is 1 if the day is a weekday, and 0 otherwise.
# 5. Route and destination are both categorical variables. We can encode these as indicator vectors, where each column represents a possible category and a 1 in the column indicates that the row belongs to that category. This is also known as a one hot encoding. Make a set of indicator features for the route, and another set of indicator features for the destination.
# 6. The names of your indicator columns for your categorical variables should be exactly the value of the categorical variable. The pandas function `pd.DataFrame.get_dummies` will be useful.
# +
def create_features(vdf):
""" Given a dataframe of labeled and truncated bus data, generate features for linear regression.
Args:
df (dataframe) : dataframe of bus data with the eta column and truncated rows
Return:
(dataframe) : dataframe of features for each example
"""
df = pd.DataFrame()
df['pdist'] =vdf["pdist"]
df['spd']= vdf['spd']
df['lat']= vdf['lat']
df['lon']= vdf['lon']
df['eta']= vdf["eta"]
df['sin_hdg']=np.sin(2*np.pi*vdf["hdg"]/360)
df['cos_hdg']=np.cos(2*np.pi*vdf["hdg"]/360)
df['sin_day_of_week']=np.sin((2*np.pi*vdf["tmstmp"].dt.dayofweek)/7)
df['cos_day_of_week']=np.cos((2*np.pi*vdf["tmstmp"].dt.dayofweek)/7)
df['sin_hour_of_day']=np.sin((2*np.pi*vdf["tmstmp"].dt.hour)/24)
df['cos_hour_of_day']=np.cos((2*np.pi*vdf["tmstmp"].dt.hour)/24)
minutes=pd.DataFrame()
mins=[]
for i in vdf["tmstmp"]:
d1 = datetime.datetime.combine(i,datetime.datetime.min.time())
secs=(i-d1).total_seconds()/60
mins.append(secs)
minutes["mins"]=mins
df['sin_time_of_day']=np.sin((2*np.pi*minutes["mins"])/(60*24))
df['cos_time_of_day']=np.cos((2*np.pi*minutes["mins"])/(60*24))
df["weekday"]=[1 if i<4 else 0 for i in vdf["tmstmp"].dt.dayofweek]
df['bias']=1
set_des=set(vdf["des"])
set_rt =set(vdf["rt"])
for i in set_des:
df[i]=[1 if i==j else 0 for j in vdf["des"]]
for i in set_rt:
df[i]=[1 if i==j else 0 for j in vdf["rt"]]
return df
# AUTOLAB_IGNORE_START
#print(labeled_vdf["des"])
vdf_features = create_features(labeled_vdf)
vdf_features
# AUTOLAB_IGNORE_STOP
# -
# AUTOLAB_IGNORE_START
with pd.option_context('display.max_columns', 26):
print(vdf_features.columns)
print(vdf_features.head())
# AUTOLAB_IGNORE_STOP
# Our implementation has the following output. Verify that your code has the following columns (order doesn't matter):
# ```python
# >>> vdf_features.columns
# Index([ u'bias', u'pdist', u'spd',
# u'lat', u'lon', u'eta',
# u'sin_hdg', u'cos_hdg', u'sin_day_of_week',
# u'cos_day_of_week', u'sin_hour_of_day', u'cos_hour_of_day',
# u'sin_time_of_day', u'cos_time_of_day', u'weekday',
# u'Braddock ', u'Downtown', u'Greenfield Only',
# u'McKeesport ', u'Murray-Waterfront', u'Swissvale',
# u'61A', u'61B', u'61C',
# u'61D'],
# dtype='object')
# bias pdist spd lat lon eta sin_hdg cos_hdg \
# 0 1.0 1106 0 40.439504 -79.996981 16 0.913545 -0.406737
# 1 1.0 1106 0 40.439504 -79.996981 15 0.913545 -0.406737
# 2 1.0 1778 8 40.438842 -79.994733 14 0.829038 -0.559193
# 3 1.0 2934 7 40.437938 -79.991213 13 0.997564 -0.069756
# 4 1.0 2934 7 40.437938 -79.991213 13 0.997564 -0.069756
#
# sin_day_of_week cos_day_of_week ... Braddock Downtown \
# 0 0.433884 -0.900969 ... 0.0 0.0
# 1 0.433884 -0.900969 ... 0.0 0.0
# 2 0.433884 -0.900969 ... 0.0 0.0
# 3 0.433884 -0.900969 ... 0.0 0.0
# 4 0.433884 -0.900969 ... 0.0 0.0
#
# Greenfield Only McKeesport Murray-Waterfront Swissvale 61A 61B 61C \
# 0 0.0 0.0 0.0 1.0 1.0 0.0 0.0
# 1 0.0 0.0 0.0 1.0 1.0 0.0 0.0
# 2 0.0 0.0 0.0 1.0 1.0 0.0 0.0
# 3 0.0 0.0 0.0 1.0 1.0 0.0 0.0
# 4 0.0 0.0 0.0 1.0 1.0 0.0 0.0
#
# 61D
# 0 0.0
# 1 0.0
# 2 0.0
# 3 0.0
# 4 0.0
#
# [5 rows x 25 columns]
# ```
# ## Q3 Linear Regression using Ordinary Least Squares [10 + 4pts]
# Now you will finally implement a linear regression. As a reminder, linear regression models the data as
#
# $$\mathbf y = \mathbf X\mathbf \beta + \mathbf \epsilon$$
#
# where $\mathbf y$ is a vector of outputs, $\mathbf X$ is also known as the design matrix, $\mathbf \beta$ is a vector of parameters, and $\mathbf \epsilon$ is noise. We will be estimating $\mathbf \beta$ using Ordinary Least Squares, and we recommending following the matrix notation for this problem (https://en.wikipedia.org/wiki/Ordinary_least_squares).
#
# ### Specification
# 1. We use the numpy term array-like to refer to array like types that numpy can operate on (like Pandas DataFrames).
# 1. Regress the output (eta) on all other features
# 2. Return the predicted output for the inputs in X_test
# 3. Calculating the inverse $(X^TX)^{-1}$ is unstable and prone to numerical inaccuracies. Furthermore, the assumptions of Ordinary Least Squares require it to be positive definite and invertible, which may not be true if you have redundant features. Thus, you should instead use $(X^TX + \lambda*I)^{-1}$ for identity matrix $I$ and $\lambda = 10^{-4}$, which for now acts as a numerical "hack" to ensure this is always invertible. Furthermore, instead of computing the direct inverse, you should utilize the Cholesky decomposition which is much more stable when solving linear systems.
class LR_model():
""" Perform linear regression and predict the output on unseen examples.
Attributes:
beta (array_like) : vector containing parameters for the features """
def __init__(self, X, y):
""" Initialize the linear regression model by computing the estimate of the weights parameter
Args:
X (array-like) : feature matrix of training data where each row corresponds to an example
y (array like) : vector of training data outputs
"""
self.beta = np.zeros(X.shape[1])
x = np.array(X)
y = np.array(y)
lambdaa = 10**-4
part1 = ((x.T @ x) +(lambdaa * np.identity(X.shape[1])))
part2 =(x.T @ y)
self.beta=np.linalg.solve(part1,part2)
pass
def predict(self, X_p):
""" Predict the output of X_p using this linear model.
Args:
X_p (array_like) feature matrix of predictive data where each row corresponds to an example
Return:
(array_like) vector of predicted outputs for the X_p
"""
x_arr = np.array(X_p)
y_pred = x_arr @ self.beta
return y_pred
pass
# We have provided some validation data for you, which is another scrape of the Pittsburgh bus data (but for a different time span). You will need to do the same processing to generate labels and features to your validation dataset. Calculate the mean squared error of the output of your linear regression on both this dataset and the original training dataset.
#
# How does it perform? One simple baseline is to make sure that it at least predicts as well as predicting the mean of what you have seen so far. Does it do better than predicting the mean? Compare the mean squared error of a predictor that predicts the mean vs your linear classifier.
#
# ### Specifications
# 1. Build your linear model using only the training data
# 2. Compute the mean squared error of the predictions on both the training and validation data.
# 3. Compute the mean squared error of predicting the mean of the **training outputs** for all inputs.
# 4. You will need to process the validation dataset in the same way you processed the training dataset.
# 5. You will need to split your features from your output (eta) prior to calling compute_mse
# +
# Calculate mean squared error on both the training and validation set
def compute_mse(LR, X, y, X_v, y_v):
""" Given a linear regression model, calculate the mean squared error for the
training dataset, the validation dataset, and for a mean prediction
Args:
LR (LR_model) : Linear model
X (array-like) : feature matrix of training data where each row corresponds to an example
y (array like) : vector of training data outputs
X_v (array-like) : feature matrix of validation data where each row corresponds to an example
y_v (array like) : vector of validation data outputs
Return:
(train_mse, train_mean_mse,
valid_mse, valid_mean_mse) : a 4-tuple of mean squared errors
1. MSE of linear regression on the training set
2. MSE of predicting the mean on the training set
3. MSE of linear regression on the validation set
4. MSE of predicting the mean on the validation set
"""
yhat = LR.predict(X)
mse_lr_tr = np.mean((y-yhat)**2)
mse_me_tr = np.mean((y-np.mean(y))**2)
yhat_v = LR.predict(X_v)
mse_lr_v = np.mean((y_v-yhat_v)**2)
mse_me_v = np.mean((y_v-np.mean(y))**2)
return (mse_lr_tr,mse_me_tr,mse_lr_v,mse_me_v)
pass
# +
# AUTOLAB_IGNORE_START
# First you should replicate the same processing pipeline as we did to the training set
vdf_valid, pdf_valid = load_data('bus_valid.db')
all_trips_valid =split_trips(vdf_valid)
labeled_trips_valid = [label_and_truncate(trip, morewood_coordinates) for trip in all_trips_valid]
labeled_vdf_valid = pd.concat(labeled_trips_valid).reset_index()
vdf_features_valid = create_features(labeled_vdf_valid)
# Separate the features from the output and pass it into your linear regression model.
y_df =vdf_features.eta
X_df = vdf_features.drop("eta",axis=1)
y_valid_df = vdf_features_valid.eta
X_valid_df =vdf_features_valid.drop("eta",axis=1)
LR = LR_model(X_df, y_df)
print(compute_mse(LR,
X_df,
y_df,
X_valid_df,
y_valid_df))
# AUTOLAB_IGNORE_STOP
# -
# As a quick check, our training data MSE is approximately 38.99.
# ## Q4 TrueTime Predictions [5pts]
# How do you fare against the Pittsburgh Truetime predictions? In this last problem, you will match predictions to their corresponding vehicles to build a dataset that is labeled by TrueTime. Remember that we only evaluate performance on the validation set (never the training set). How did you do?
#
# ### Specification
# 1. You should use the pd.DataFrame.merge function to combine your vehicle dataframe and predictions dataframe into a single dataframe. You should drop any rows that have no predictions (see the how parameter). (http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.merge.html)
# 2. You can compute the TrueTime ETA by taking their predicted arrival time and subtracting the timestamp, and converting that into an integer representing the number of minutes.
# 3. Compute the mean squared error for linear regression only on the rows that have predictions (so only the rows that remain after the merge).
# +
def compare_truetime(LR, labeled_vdf, pdf):
""" Compute the mse of the truetime predictions and the linear regression mse on entries that have predictions.
Args:
LR (LR_model) : an already trained linear model
labeled_vdf (pd.DataFrame): a dataframe of the truncated and labeled bus data (same as the input to create_features)
pdf (pd.DataFrame): a dataframe of TrueTime predictions
Return:
(tt_mse, lr_mse): a tuple of the TrueTime MSE, and the linear regression MSE
"""
featured_vdf = create_features(labeled_vdf)
eta =featured_vdf["eta"]
x = featured_vdf.drop("eta",axis=1)
eta_hat = LR.predict(x)
labeled_vdf["eta_lr"]=eta_hat
labeled_vdf.reset_index()
merged_df=pd.merge(labeled_vdf, pdf,how="inner")
mins = np.array(((merged_df["prdtm"]-merged_df["tmstmp"]).dt.seconds)/60)
merged_df["eta_tt"] =mins
mse_lr = np.mean((merged_df["eta_lr"]-merged_df["eta"])**2)
mse_tt = np.mean((merged_df["eta_tt"]-merged_df["eta"])**2)
return (mse_tt,mse_lr)
pass
# AUTOLAB_IGNORE_START
compare_truetime(LR, labeled_vdf_valid, pdf_valid)
# AUTOLAB_IGNORE_STOP
#50.20239900730732, 60.40782041336532
# -
# As a sanity check, your linear regression MSE should be approximately 50.20.
# ## Q5 Feature Engineering contest (bonus)
#
# You may be wondering "why did we pick the above features?" Some of the above features may be entirely useless, or you may have ideas on how to construct better features. Sometimes, choosing good features can be the entirety of a data science problem.
#
# In this question, you are given complete freedom to choose what and how many features you want to generate. Upon submission to Autolab, we will run linear regression on your generated features and maintain a scoreboard of best regression accuracy (measured by mean squared error).
#
# The top scoring students will receive a bonus of 5 points.
#
# ### Tips:
# * Test your features locally by building your model using the training data, and predicting on the validation data. Compute the mean squared error on the **validation dataset** as a metric for how well your features generalize. This helps avoid overfitting to the training dataset, and you'll have faster turnaround time than resubmitting to autolab.
# * The linear regression model will be trained on your chosen features of the same training examples we provide in this notebook.
# * We test your regression on a different dataset from the training and validation set that we provide for you, so the MSE you get locally may not match how your features work on the Autolab dataset.
# * We will solve the linear regression using Ordinary Least Squares with regularization $\lambda=10^{-4}$ and a Cholesky factorization, exactly as done earlier in this notebook.
# * Note that the argument contains **UNlabeled** data: you cannot build features off the output labels (there is no ETA column). This is in contrast to before, where we kept everything inside the same dataframe for convenience. You can produce the sample input by removing the "eta" column, which we provide code for below.
# * Make sure your features are all numeric. Try everything!
# +
def contest_features(vdf, vdf_train):
""" Given a dataframe of UNlabeled and truncated bus data, generate ANY features you'd like for linear regression.
Args:
vdf (dataframe) : dataframe of bus data with truncated rows but unlabeled (no eta column )
for which you should produce features
vdf_train (dataframe) : dataframe of training bus data, truncated and labeled
Return:
(dataframe) : dataframe of features for each example in vdf
"""
# create your own engineered features
pass
# AUTOLAB_IGNORE_START
# contest_cols = list(labeled_vdf.columns)
# contest_cols.remove("eta")
# contest_features(labeled_vdf_valid[contest_cols], labeled_vdf).head()
# AUTOLAB_IGNORE_STOP
# -
| 24,644 |
/experimentation/jiaxin_experiment/.ipynb_checkpoints/OOP_format-checkpoint.ipynb | eafe04a384f0c3bdb07deb7ef176ea052e9ceea3 | [] | no_license | mtaziz/STB_social_media | https://github.com/mtaziz/STB_social_media | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 21,460 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Amend to set wd to STB_social_media_analytics.
# %cd /home/zyf0717/git_environment/STB_social_media_analytics/
import pandas as pd
import mysql.connector
import re
import yaml
import utils
from random import random
from selenium import webdriver
from datetime import datetime, date, timedelta
from time import sleep
# +
with open('config_file.yml') as file:
configs = yaml.load(file, Loader=yaml.FullLoader)
chromedriver_path = configs['General']['chromedriver_path']
db_in_flag = configs['TripAdvisor']['db_in_flag']
db_out_flag = configs['TripAdvisor']['db_out_flag']
if db_in_flag == 'csv':
### FOR POC ONLY ###
poi_index = [1, 2]
poi_name = ['Gardens by the Bay',
'Marina Bay Sands Skypark'
]
poi_url = ['https://www.tripadvisor.com.sg/Attraction_Review-g294265-d2149128-Reviews-Gardens_by_the_Bay-Singapore.html',
'https://www.tripadvisor.com.sg/Attraction_Review-g294265-d1837767-Reviews-Marina_Bay_Sands_Skypark-Singapore.html'
]
poi_df = pd.DataFrame({'poi_index':poi_index,
'poi_name':poi_name,
'poi_url':poi_url}
)
####################
if db_in_flag in ['sqlite', 'mysql']:
poi_df = pd.DataFrame()
if db_in_flag in ['sqlite', 'mysql'] or db_out_flag in ['sqlite', 'mysql']:
cnx = mysql.connector.connect(host=configs['General']['host'],
database=configs['General']['database'],
user=configs['General']['user'],
password=configs['General']['password']
)
else:
cnx = None
# -
class CrawlTripAdvisor:
attributes_col_names = ['POI_INDEX',
'TOTAL_REVIEWS',
'RANKING',
'AVERAGE_RATING',
'RATING_5_COUNT',
'RATING_4_COUNT',
'RATING_3_COUNT',
'RATING_2_COUNT',
'RATING_1_COUNT',
'ABOUT',
'ADDRESS',
'ATTRIBUTES_CRAWLED_TIME'
]
reviews_col_names = ['REVIEW_INDEX',
'WEBSITE_INDEX',
'POI_INDEX',
'REVIEWER_URL',
'REVIEW_ID',
'REVIEW_DATE',
'REVIEW_RATING',
'REVIEW_TITLE',
'REVIEW_BODY',
'DATE_OF_EXPERIENCE',
'TRIP_TYPE',
'REVIEW_CRAWLED_TIME'
]
reviewers_col_names = ['REVIEWER_URL',
'REVIEWER_NAME',
'RAW_HOME_LOCATION',
'CLEANED_HOME_LOCATION',
'NUMBER_OF_CONTRIBUTIONS',
'HELPFUL_VOTES',
'REVIEWER_UPDATED_TIME'
]
def __init__(self, chromedriver_path, poi_df, cnx, db_out_flag):
self.driver = webdriver.Chrome(chromedriver_path)
if cnx is not None:
self.cursor = cnx.cursor()
self.poi_df = poi_df
self.db_out_flag = db_out_flag
self.number_of_pages = None
self.earliest_date = None
self.attributes_df = pd.DataFrame(columns=self.attributes_col_names)
self.reviews_df = pd.DataFrame(columns=self.reviews_col_names)
self.reviewers_df = pd.DataFrame(columns=self.reviewers_col_names)
# Create unique CSVs.
self.datetime_string = datetime.now().strftime('%y%m%d_%H%M%S')
self.attributes_df.to_csv('./tripadvisor/output/attributes_{}.csv'.format(self.datetime_string), mode='a', index=False)
self.reviews_df.to_csv('./tripadvisor/output/reviews_{}.csv'.format(self.datetime_string), mode='a', index=False)
self.reviewers_df.to_csv('./tripadvisor/output/reviewers_{}.csv'.format(self.datetime_string), mode='a', index=False)
def add_to_database(self):
# Read csv, add to database, then cnx.commit().
return
def crawl_pois(self, number_of_pages=None, earliest_date=None):
self.number_of_pages = number_of_pages
self.earliest_date = earliest_date
for _, row in poi_df.iterrows():
self.driver.get(row['poi_url'])
self.crawl_attributes(row['poi_index'])
self.crawl_reviews(row['poi_index'])
self.attributes_df.to_csv('./tripadvisor/output/attributes_{}.csv'.format(self.datetime_string), mode='a', header=False, index=False)
self.attributes_df = pd.DataFrame(columns=self.attributes_col_names)
def crawl_reviews(self, poi_index):
if self.earliest_date is not None:
# Crawl up till earliest_date.
pass
elif self.number_of_pages is not None:
for i in range(self.number_of_pages):
self.crawl_reviews_1_page(poi_index)
self.reviews_df.to_csv('./tripadvisor/output/reviews_{}.csv'.format(self.datetime_string), mode='a', header=False, index=False)
self.reviewers_df.to_csv('./tripadvisor/output/reviewers_{}.csv'.format(self.datetime_string), mode='a', header=False, index=False)
self.reviews_df = pd.DataFrame(columns=self.reviews_col_names)
self.reviewers_df = pd.DataFrame(columns=self.reviewers_col_names)
else:
# Crawl all pages.
pass
def crawl_attributes(self, poi_index):
driver = self.driver
# Crawling attributes elements.
ranking_text = driver.find_element_by_xpath('//span[@class="header_popularity popIndexValidation "]').text
rating_breakdown_elements = driver.find_elements_by_xpath('//span[@class="location-review-review-list-parts-ReviewRatingFilter__row_num--3cSP7"]')
address_text_1 = driver.find_element_by_xpath('//span[@class="street-address"]').text
address_text_2 = driver.find_element_by_xpath('//span[@class="extended-address"]').text
address_text_3 = driver.find_element_by_xpath('//span[@class="locality"]').text
address_text_4 = driver.find_element_by_xpath('//span[@class="country-name"]').text
about_more_button = driver.find_elements_by_xpath('//span[@class="attractions-attraction-detail-about-card-Description__readMore--2pd33"]')
if about_more_button != []:
about_more_button[0].click()
sleep(0.5)
about_text = driver.find_element_by_xpath('//div[@class="attractions-attraction-detail-about-card-Description__modalText--1oJCY"]').text
about_more_close_button = driver.find_element_by_xpath('//div[@class="_2EFRp_bb"]')
about_more_close_button.click()
sleep(0.5)
else:
about_text = driver.find_element_by_xpath('//div[@class="attractions-attraction-detail-about-card-AttractionDetailAboutCard__section--1_Efg"]').text
# Parsing attributes.
rating_breakdown = self.parse_rating_breakdown_elements(rating_breakdown_elements)
total_reviews = self.calculate_total_reviews(rating_breakdown)
ranking = self.parse_ranking_text(ranking_text)
average_rating = self.calculate_average_rating(rating_breakdown)
about = about_text
address = self.parse_address_text(address_text_1,
address_text_2,
address_text_3,
address_text_4
)
poi_attributes = [poi_index,
total_reviews,
ranking,
average_rating,
rating_breakdown[0],
rating_breakdown[1],
rating_breakdown[2],
rating_breakdown[3],
rating_breakdown[4],
about,
address,
datetime.now()
]
# Inserting attributes into dataframe
poi_attributes_dict = dict(zip(self.attributes_col_names, poi_attributes))
self.attributes_df = self.attributes_df.append(poi_attributes_dict, ignore_index=True)
def crawl_reviews_1_page(self, poi_index, earliest_date=None):
driver = self.driver
# If crawl all languages, uncomment the follwing 3 lines.
# all_languages_button = driver.find_element_by_xpath('//span[@class="location-review-review-list-parts-LanguageFilter__no_wrap--2Dckv"]')
# all_languages_button.click()
# sleep(1)
read_more_button = driver.find_element_by_xpath('//span[@class="location-review-review-list-parts-ExpandableReview__cta--2mR2g"]')
read_more_button.click()
sleep(1)
# Crawling review elements.
reviewer_url_elements = driver.find_elements_by_xpath('//a[@class="ui_header_link social-member-event-MemberEventOnObjectBlock__member--35-jC"]')
reviewer_details_elements = driver.find_elements_by_xpath('//div[@class="social-member-event-MemberEventOnObjectBlock__event_wrap--1YkeG"]')
review_id_elements = driver.find_elements_by_xpath('//div[@class="location-review-review-list-parts-SingleReview__mainCol--1hApa"]')
review_rating_elements = driver.find_elements_by_xpath('//div[@class="location-review-review-list-parts-RatingLine__bubbles--GcJvM"]/span')
review_title_elements = driver.find_elements_by_xpath('//a[@class="location-review-review-list-parts-ReviewTitle__reviewTitleText--2tFRT"]')
review_body_elements = driver.find_elements_by_xpath('//div[@class="location-review-review-list-parts-ExpandableReview__containerStyles--1G0AE"]')
date_of_experience_elements = driver.find_elements_by_xpath('//span[@class="location-review-review-list-parts-EventDate__event_date--1epHa"]')
for i in range(len(reviewer_url_elements)):
# Parsing review and reviewer details
reviewer_url = reviewer_url_elements[i].get_attribute('href')
reviewer_name = reviewer_url_elements[i].text
review_id = self.parse_review_id_elements(review_id_elements[i].get_attribute('data-reviewid'))
review_date = self.parse_review_date(reviewer_details_elements[i].text)
location_contribution_votes = self.parse_location_contributions_votes(reviewer_details_elements[i].text)
review_rating = self.parse_review_rating(review_rating_elements[i].get_attribute('class'))
review_title = review_title_elements[i].text
review_body = self.parse_review_body(review_body_elements[i].text)
date_of_experience = self.parse_date_of_experience(review_body_elements[i].text)
trip_type = self.parse_trip_type(review_body_elements[i].text)
review_details = [None, # REVIEW_INDEX
1, # WEBSITE_INDEX (TripAdvisor is '1')
poi_index,
reviewer_url,
review_id,
review_date,
review_rating,
review_title,
review_body,
date_of_experience,
trip_type,
datetime.now()
]
reviewer_details = [reviewer_url,
reviewer_name,
location_contribution_votes[0],
None, # CLEANED_HOME_LOCATION
location_contribution_votes[1],
location_contribution_votes[2],
datetime.now()
]
# Inserting reviews into dataframe.
review_details_dict = dict(zip(self.reviews_col_names, review_details))
self.reviews_df = self.reviews_df.append(review_details_dict, ignore_index=True)
# Inserting reviewers into dataframe.
reviewer_details_dict = dict(zip(self.reviewers_col_names, reviewer_details))
self.reviewers_df = self.reviewers_df.append(reviewer_details_dict, ignore_index=True)
next_button = driver.find_element_by_xpath('//a[@class="ui_button nav next primary "]')
if next_button != []:
next_button.click()
sleep(1)
# Methods below are all utility functions.
def calculate_total_reviews(self, rating_breakdown):
return sum(rating_breakdown)
def parse_ranking_text(self, text):
return int(text[1:text.find(' of')].replace(',', ''))
def calculate_average_rating(self, rating_breakdown):
total = sum(rating_breakdown)
average = 0
for i, j in enumerate(rating_breakdown[::-1]):
average += (i+1)*j/total
return average
def parse_rating_breakdown_elements(self, elements):
rating_breakdown = []
for element in elements:
text = element.text
rating_breakdown.append(int(text.replace(",", "")))
return rating_breakdown
def parse_address_text(self, text_1, text_2, text_3, text_4):
return ('{}, {}, {} {}'.format(text_1, text_2, text_3, text_4))
def parse_review_date(self, text):
date_string = text[text.find('wrote a review ')+15:text.find('\n')]
if date_string == 'Today':
return datetime.now().strftime('%d-%m-%Y')
elif date_string == 'Yesterday':
return (datetime.now() - timedelta(1)).strftime('%d-%m-%Y')
re_search = re.search('(\d+) (\w+)', date_string)
current_year = datetime.now().strftime('%Y')
if re_search is not None:
if len(re_search.group(1)) == 1:
return datetime.strptime('0' + date_string + ' ' + current_year, '%d %b %Y').strftime('%d-%m-%Y')
else:
return datetime.strptime(date_string + ' ' + current_year, '%d %b %Y').strftime('%d-%m-%Y')
return datetime.strptime(date_string, '%b %Y').strftime('%m-%Y')
def parse_location_contributions_votes(self, text):
location, contributions, votes = None, None, None
votes_search = re.search('(\d+) helpful votes?', text)
if votes_search is not None:
votes = int(votes_search.group(1))
contributions_search = re.search('(\d+) contributions?', text)
if contributions_search is not None:
contributions = int(contributions_search.group(1))
location_search = re.search('(.+?){} contributions?'.format(contributions), text)
if location_search is not None:
location = location_search.group(1)
return location, contributions, votes
def parse_review_id_elements(self, text):
return int(text)
def parse_review_rating(self, text):
return int(text[-2:])//10
def parse_review_body(self, text):
return text[:text.find('Read less')-1]
def parse_date_of_experience(self, text):
substring = re.search('Date of experience: (.+)\n', text).group(1)
return datetime.strptime(substring, '%B %Y').strftime('%m-%Y')
def parse_trip_type(self, text):
if text.find('Trip type: ') == -1:
return None
substring = text[text.find('Trip type: ')+11:]
return substring[:substring.find('\n')]
CrawlTripAdvisor(chromedriver_path, poi_df, cnx, db_out_flag).crawl_pois(number_of_pages=2)
datetime.now().strftime('%y%m%d_%H%M%S')
'12 Jan'.split()
| 16,636 |
/Python_advanced/multiprocessing.ipynb | b5a17c2a95cc436dfc73680b386204cd5a1e373b | [] | no_license | Artia-Inspirenet/education | https://github.com/Artia-Inspirenet/education | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 1,923 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 建立明确网络和损失的输出输出流以及损失的计算定义
# - by 刘道会
# - 2020-08 于重庆大学
# # 1.应当给网络输入什么?
# 输入的肯定是图像信息,但是需要原始的图像信息需要经过一些处理以及格式变换才能作为输出,其实这个过程中维度的变化倒不是很大(可能会resize),主要是经过处理后的图像所包含的信息的一些处理。
#
# 因为可能会对图像进行很多操作,添加一些功能之类的,而且我们当然希望这些操作是可拓展的,所以说最好定义一个输入图像的类,然后需要什么功能就在类中为它定义函数,将最终的输出图像作为这个类一个属性,这样操作起来会非常方便。
#
# 那么,就开始进行这个类的定义吧
# 肯定要建立在之前工作的基础上,所以把前面的代码中要用到的部分copy一份过来
# +
import os
import glob
cornell_path = '../cornell'
graspf = glob.glob(os.path.join(cornell_path,'*','pcd*cpos.txt'))
graspf.sort()
rgbf = [filename.replace('cpos.txt','r.png') for filename in graspf]
depthf = [filename.replace('cpos.txt','d.tiff') for filename in graspf]
# +
import cv2
import numpy as np
class Image:
'''定义一个图像类,主要功能是将原始的图像输入转化为适合网络训练的格式并根据图像处理需求完成一些其他功能'''
def __init__(self,img):
'''
:功能 :类初始化函数
:参数 :ndarray,原始图像
'''
self.img = img
@classmethod #注意,使用cls的时候要在该函数前面加装饰器声明
def from_file(cls,file_path):
'''
:功能 : 从原始图片的路径对其进行载入
:参数 file_path : str,原始图像所在的路径
:返回 class : 由指定路径的原始图片实例化的Image类
:备注 : 这里用到的cls方法要学习一下
'''
return cls(cv2.imread(file_path))
# -
# 上面这就完成了这个类的基本定义,下面对它的两种初始化方法进行测试:
# +
#第一种方法,直接传入原始图像来初始化
img = cv2.imread(rgbf[0])
class1 = Image(img)
class2 = Image.from_file(rgbf[0])
print(class1.img.shape)
print(class2.img.shape)
print(class1.img == class2.img)
# -
# 下面为其添加一些需要用到的预处理操作
# +
#import cv2 #这块本来是用cv2.imread来做读取的,后面发现读不了tiff,所以就改用imageio了
from imageio import imread
import numpy as np
class Image:
'''定义一个图像类,主要功能是将原始的图像输入转化为适合网络训练的格式并根据图像处理需求完成一些其他功能'''
def __init__(self,img):
'''
:功能 :类初始化函数
:参数 :ndarray,原始图像
'''
self.img = img
@classmethod
def from_file(cls,file_path):
'''
:功能 : 从原始图片的路径对其进行载入
:参数 file_path : str,原始图像所在的路径
:返回 class : 由指定路径的原始图片实例化的Image类
:备注 : 这里用到的cls方法要学习一下
'''
return cls(imread(file_path))
def img_format(self):
'''
:功能 :将原始图像转换为指定格式
'''
pass
def normalize(self):
'''
:功能 :将图像像素值标准化至[0,1]范围
'''
self.img = self.img.astype('float32')/255.0
self.img = self.img-self.img.mean()
def crop(self):
'''
:功能 :对图像进行裁剪操作
'''
pass
# +
#测试标准化函数的功能:
rgbclass = Image.from_file(rgbf[0])
print('标准化之前均值为%.3f' % rgbclass.img.mean())
print('标准化之前最大值为%d,最小值为%d' % (rgbclass.img.max(),rgbclass.img.min()))
rgbclass.normalize()
print('标准化之后均值为%.3f' % rgbclass.img.mean())
print('标准化之后最大值为%.3f,最小值为%.3f' % (rgbclass.img.max(),rgbclass.img.min()))
# -
print(rgbclass.img.shape)
# 到此预处理结束,图像信息就可以作为网络的输入信息了。
# 然后仿照着这个再给深度图写一个载入的类
class DepthImage(Image):
'''深度图像类,读取,载入,正则等预处理都是一样的,后面可能会添加一些针对深度图的专属处理功能'''
def __init__(self,img):
super(DepthImage,self).__init__(img)
@classmethod
def from_file(cls,file_path):
'''
:功能 : 从原始图片的路径对其进行载入
:参数 file_path : str,原始图像所在的路径
:返回 class : 由指定路径的原始图片实例化的Image类
:备注 : 这里用到的cls方法要学习一下
'''
return cls(imread(file_path))
def normalize(self):
'''
:功能 :将图像像素值标准化至[0,1]范围
'''
self.img = self.img.astype('float32')/255.0
self.img = self.img-self.img.mean()
a = DepthImage.from_file(depthf[0])
a.img.shape
# # 2.如何设置网络的输出参数?
# 网络的输出肯定是根据设置的表征参数来做的,也就是你怎么去定义一个抓取,比如这里的抓取通常使用五维信息参数(x,y,$\theta$,width,h)h这个参数没什么意思,就是夹爪的指宽度,所以关键的信息就是四个(x,y,$\theta$,width),我们选择这四个参数来确定一个抓取,而网络最终的预测输出要是一个抓取,那么网络最终的预测输出值就应该是这几个值的预测结果。所以,需要将现有的标注信息转化为本方法中所选择表征方法的几个参数,同时,对图像进行预处理做数据增强时应该保证这些标注也同步进行处理,因此,最好也定义一个类来对数据进行处理,功能就是将现有的标注转化为所需要的标注,同时,要有一些预处理的操作。
#
# 表征抓取的方法如下图所示:
# ![nihao](../images/5parameters.png)
# 首先还是一个一个地来,定义一个类,输入为一个抓取框的四个角点(每个点有两个坐标,所以共八个数据)信息,然后功能就是从这些点的坐标中提取出想要的信息。
#
# 后面程序中所用到的(x0,y0)即对应图中的右上角点
# 为了方便处理,这个类的输入设置为角点的坐标信息,要经过一步预先的提取,这一步上次的程序已经做过了,这里直接复制过来
# +
def str2num(point):
'''
:功能 :将字符串类型存储的抓取框脚点坐标取整并以元组形式返回
:参数 :point,字符串,以字符串形式存储的一个点的坐标
:返回值 :列表,包含int型抓取点数据的列表[x,y]
'''
x,y = point.split()
x,y = int(round(float(x))),int(round(float(y)))
return np.array([x,y])
def get_rectangles(cornell_grasp_file):
'''
:功能 :从抓取文件中提取抓取框的坐标信息
:参数 :cornell_grap_file:字符串,指向某个抓取文件的路径
:返回值 :列表,包含各个抓取矩形数据的列表
'''
grasp_rectangles = []
with open(cornell_grasp_file,'r') as f:
while True:
grasp_rectangle = []
point0 = f.readline().strip()
if not point0:
break
point1,point2,point3 = f.readline().strip(),f.readline().strip(),f.readline().strip()
grasp_rectangle = np.array([str2num(point0),
str2num(point1),
str2num(point2),
str2num(point3)])
grasp_rectangles.append(grasp_rectangle)
return grasp_rectangles
# +
#测试一下这个程序的功能是否符合预期
rectangles0 = get_rectangles(graspf[0])
print(type(rectangles0[0]))
print(rectangles0[0].shape)
print(rectangles0[0])
#这里可以看出,rectangles0中的每个元素都是我们需要的一个四个点数据
# -
# 下面开始抓取框处理类的定义
# +
import numpy as np
class Grasp:
'''定义一个抓取框处理类,主要功能是将原始的抓取框标注信息转化为训练所定义的表征信息,如中心位置,面积角度等,并根据图像处理需求完成一些相应的其他功能'''
def __init__(self,points):
'''
:功能 : 类初始化函数
:参数 points : 2darry,定义一个抓取框的四个角点坐标信息[[x1,y1],[x2,y2],[x3,y3],[x4,x4]]
'''
self.points = points
# -
gr = Grasp(rectangles0[0])
gr.points
# 首先计算抓取矩形框中心坐标,也就是计算这四个点的x均值和y均值,这个通过一行代码就可以实现
#
center = np.mean(gr.points,axis = 0).astype(np.int)
print(center)
# 然后计算抓取框的宽度(对应二指夹爪张开的宽度),只需要计算出对边的长度就行了,这里在前面实验的时候发现第一个点(x0,y0)到第二个点(x1,y1),第三个点到第四个点代表的就是我们想要的机械爪张开宽度,所以,直接计算即可
# +
x0,y0 = gr.points[0][0],gr.points[0][1]
x1,y1 = gr.points[1][0],gr.points[1][1]
dx = x0-x1
dy = y0-y1
width = np.sqrt(dx**2+dy**2)
print(width)
# -
# 然后是抓取框的长度,虽然没什么用,但还是可以计算一下,通过第二个点到第三个点之间的距离计算即可
# +
x1,y1 = gr.points[1][0],gr.points[1][1]
x2,y2 = gr.points[2][0],gr.points[2][1]
dx = x2-x1
dy = y2-y1
length = np.sqrt(dx**2+dy**2)
print(length)
# -
# 这里有必要说明一下,我这的width是抓取框的宽度,也就是手指的张开宽度,但ggcnn里面的length才是,我就是觉得用width更合理,所以就这么搞了,这俩是反的,后面应用(比如as_gr里面)的时候注意,其他没啥影响。
# **再次明确一下:在我这里,**
#
# **gr.width表示的是手爪抓取时的张开宽度,也就是标注框的第一个点(x0,y0)与第二个点(x1,y1)之间连线的长度;
# gr.length表示的是手指的,也就是标注框的第二个点(x1,y1)与第三个点(x2,y2)之间连线的长度;**
#
# **对比之下,显然gr.width这个量更加重要,而在ggcnn里面则是length这个量更重要**
# 下面是角度的计算,注意是和水平方向(x轴)的夹角:
#
# 这里使用np.arctan2来计算夹角,这个函数的好处是可以指定最终角度所在的象限,只需要在输入值上给上符号就行了,但注意应该把y放在前面,比如(+y,+x)代表第一象限,(-y,+x)代表第四象限,具体可以参照这个网址:https://numpy.org/doc/stable/reference/generated/numpy.arctan2.html
# 由图,计算这个角,还是用第一个点和第二个点的数据,这里我可以确定第一个点和第二个点连成的线是一条代表夹爪张开宽度的线,但具体在那边并不能确定,因为这一点在标注的时候他们就没有注意,如下图所示,这是不画出最后一条线的抓取矩形的效果,可以一定程度上反映出抓取框标注的顺序。
# ![抓取框可视化](../images/uncomplete_label.png)
#
#
# 此外,关于角度,即使是和x轴的夹角,同一个条线也会两个不同的结果,但是这两个角是等价的,所以取其一便好,选择与x轴正方向的夹角即可,下面的这行代码的意思就是,计算出最终的角度并将其角度转换为了与x轴正方向所成的锐角,我直接从gg-cnn中摘抄过来了。
# +
x0,y0 = gr.points[0][0],gr.points[0][1]
x1,y1 = gr.points[1][0],gr.points[1][1]
dx = x0-x1
dy = y0-y1
#我觉得这个地方给y加个负号是没有意义的,因为如上图所示,标注的情况是有很多种的,你也不知道最终dy是正是负,最后只要保证预测和标注处理方法一致就行了,当然,也可能作者有什么高见我没看懂。
#angle = ((np.arctan2(-dy,dx) + np.pi/2) % np.pi - np.pi/2)/np.pi*180 如果要输出角度制的话就用这行代码
angle = ((np.arctan2(-dy,dx) + np.pi/2) % np.pi - np.pi/2)
print(angle)
# -
# 将第一个样本的标注框可视化后,发现角度确实是一个接近0的负角度,所以结果正确
# 到这,我们用于表征一次抓取的四个参数就都提取出来了,把这几个功能集成到类里面,这块的完整代码如下:
# +
import numpy as np
from skimage.draw import polygon
class Grasp:
'''定义一个抓取框处理类,主要功能是将由四个角点坐标定义的原始的抓取框提取转化为训练所定义的表征信息,如中心位置,面积角度等,并根据图像处理需求完成一些相应的其他功能'''
def __init__(self,points):
'''
:功能 : 类初始化函数
:参数 points : 2darry,定义一个抓取框的四个角点坐标信息[[x1,y1],[x2,y2],[x3,y3],[x4,x4]]
'''
self.points = points
@property#类装饰器,可以让一个类的方法以属性的方式被调用
def center(self):
'''
:功能 : 计算本类中所包含的抓取框的中心点
:返回 1darray : 本类所包含抓取框的中心点array[x,y]
'''
center = np.mean(self.points,axis = 0).astype(np.int)
return center
@property
def width(self):
'''
:功能 : 计算本类中所包含的抓取框手指张开宽度width
:返回 1darray : 本类所包含抓取框的长度[width]
'''
#第二个点和第三个点之间的间距长度
dx = self.points[0][0] - self.points[1][0]
dy = self.points[0][1] - self.points[1][1]
return np.sqrt(dx**2+dy**2)
@property
def length(self):
'''
:功能 : 计算本类中所包含的抓取框长度(手指张开宽度width的邻边)
:返回 1darray : 本类所包含抓取框的长度[length]
'''
#第二个点和第三个点之间的间距长度
dx = self.points[1][0] - self.points[2][0]
dy = self.points[1][1] - self.points[2][1]
return np.sqrt(dx**2+dy**2)
@property
def angle(self):
'''
:功能 : 计算本类中所包含的抓取框相对于x轴正方向的偏转角度
:返回 1darray : 本类所包含抓取框的旋转角度(弧度值)
'''
dx = self.points[0][0] - self.points[1][0]
dy = self.points[0][1] - self.points[1][1]
return (np.arctan2(-dy,dx) + np.pi/2) % np.pi - np.pi/2
def compact_polygon_coords(self,shape):
'''
:功能 : 计算并返回本抓取矩形内部点的坐标
:参数 shape : tuple, optional.Image shape which is used to determine the maximum extent of output pixel coordinates.
:返回 ndarray : rr,cc 本抓取框内部点的行列坐标
'''
return polygon(self.points[:,0],self.points[:,1],shape)
# -
#下面测试定义的模型输出
gr = Grasp(rectangles0[0])
gr.center
gr.width
gr.length
gr.angle
# 有了这些参数之后,如何将其构建成为输出是一个值得考虑的问题,我之前一直想不懂该怎么搞,看了ggcnn之后才明白,是这样的,每个抓取框都是由上面的这几个参数完全定义的,而且抓取框也覆盖了图中的一个矩形范围,那么我就针对每个参数都生成一张映射图,比如一张图就代表角度,没有抓取框的区域,像素值全为0,有抓取框覆盖的区域,像素值就为该抓取框的角度值,这样,就建立了映射关系,前面的工作已经能够读出每个抓取框的各个参数的值,后面要做一个工作,就是把它所在的区域找出来,然后把这些值赋进去,这部分用到了compact_polygon_coords()函数,前面已经定义好了,就是直接使用现有函数,根据四个角点确定一个矩形,该函数(skimage.draw.polygon)可以返回该矩形内部点的坐标,然后直接赋值即可。
print(gr.points[:,0])
print(gr.points[:,1])
# 测试一下这个新定义的compact_polygon_coords()函数是否正常工作
# +
import matplotlib.pyplot as plt
import cv2
img = np.zeros([300,300])#处理之前这个图的大小就是480*640
rr,cc = gr.compact_polygon_coords((300,300))
img[cc,rr] = 1.0#不晓得为什么这里我的rr,cc要反过来才对,而gg-cnn里面却没有反,估计是什么地方做了处理吧
plt.subplot(211)
plt.imshow(img)
plt.subplot(212)
plt.imshow(cv2.imread(rgbf[0]))
plt.show()
# -
# 前面的工作其实已经结束了,做到后面的时候考虑到一个对象肯定是标注了多个抓取框的,而上面的类仅仅能通过四个点去定义一个抓取,出于后续工作方便操作的需求,这里又回来重新定义一个多抓取框类Grasps,能够一次性地从原始标注文件中读出全部的抓取框并生成上面的对象,同时,在这个多抓取框类中写函数的话也方便将同一对象中所包含的多个抓取框的信息融合到一起来作为真实标注。
class Grasps:
'''定义一个多抓取框处理类,主要功能是从原始的标注文件中读出多个抓取框并将其构建成多个单一的抓取框Grasp类,同时能够对这些属于同一对象的多个抓取框对象进行一些数据的统一集成处理'''
def __init__(self,grs = None):
'''
:功能 : 多抓取框类初始化函数,功能是将属于一个对象的多个单独的抓取框集成到一个类里面来。
:参数 grs : list,包含一个对象中多个抓取框类的列表
'''
if grs:
self.grs = grs
else:
self.grs = []
def generate_img(self,pos = True,angle = True,width = True,shape = (480,640)):
'''
:功能 :将本对象的多个的抓取框信息融合并生成指定的映射图,以这种方式返回定义一个抓取的多个参数,包括中心点,角度,宽度
:参数 pos :bool,是否生成返回位置映射图
:参数 angle :bool,是否生成返回角度映射图
:参数 width :bool,是否生成返回夹爪宽度映射图
:参数 shape :tuple
:返回 :融合本对象的多个抓取框信息的映射图
'''
if pos:
pos_out = np.zeros(shape)
else:
pos_out = None
if angle:
angle_out = np.zeros(shape)
else:
angle_out = None
if width:
width_out = np.zeros(shape)
else:
width_out = None
for gr in self.grs:
rr,cc = gr.compact_polygon_coords(shape)#shape的指定还是很重要的,可以考虑图像边界
if pos:
pos_out[cc,rr] = 1.0
if angle:
angle_out[cc,rr] = gr.angle
if width:
width_out[cc,rr] = gr.width
return pos_out,angle_out,width_out
# 测试一下功能:
#使用之前定义好的程序从文件中载入抓取框
rectangles = get_rectangles(graspf[0])
# +
#将多个抓取框先存到一个列表里面,然后用它构建一个刚定义的多抓取框对象
grs = []
for rectangle in rectangles:
grs.append(Grasp(rectangle))
grasps = Grasps(grs)
# -
grasps.grs[0].width
pos_img,angle_img,width_img = grasps.generate_img(shape = (480,640))
# +
plt.figure(figsize = (30,30))
plt.subplot(411)
plt.imshow(cv2.imread(rgbf[0]))
plt.title('raw_img')
plt.subplot(412)
plt.imshow(pos_img)
plt.title('pos_img')
plt.subplot(413)
plt.imshow(angle_img)
plt.title('angle_img')
plt.subplot(414)
plt.imshow(width_img)
plt.title('width_img')
plt.show()
# -
# 上面的方法还是集成度不够,需要先用函数读取框然后再生成类,下面给类添加一个从源文件载入的函数load_from_cornell_files
class Grasps:
'''定义一个多抓取框处理类,主要功能是从原始的标注文件中读出多个抓取框并将其构建成多个单一的抓取框Grasp类,同时能够对这些属于同一对象的多个抓取框对象进行一些数据的统一集成处理'''
def __init__(self,grs = None):
'''
:功能 : 多抓取框类初始化函数,功能是将属于一个对象的多个单独的抓取框集成到一个类里面来。
:参数 grs : list,包含一个对象中多个抓取框类的列表
'''
if grs:
self.grs = grs
else:
self.grs = []
@classmethod
def load_from_cornell_files(cls,cornell_grasp_files):
'''
:功能 : 从一个graspf文件中读取载入多个抓取框并构建成为这个类(其实就是从之前的那个get_rectangles改的)
:参数 grs : list,包含一个对象中多个抓取框类的列表
'''
grasp_rectangles = []
with open(cornell_grasp_files,'r') as f:
while True:
grasp_rectangle = []
point0 = f.readline().strip()
if not point0:
break
point1,point2,point3 = f.readline().strip(),f.readline().strip(),f.readline().strip()
if point0[0] == 'N':#后面发现有些坐标点坐标是NaN,会报错,这里处理一下,暂时还不晓得gg-cnn里面怎么处理的
break
grasp_rectangle = np.array([str2num(point0),
str2num(point1),
str2num(point2),
str2num(point3)])
grasp_rectangles.append(Grasp(grasp_rectangle))#找出各个框后就直接用它构造Grasp对象了
return cls(grasp_rectangles)#返回实例化的类
def generate_img(self,pos = True,angle = True,width = True,shape = (480,640)):
'''
:功能 :将本对象的多个的抓取框信息融合并生成指定的映射图,以这种方式返回定义一个抓取的多个参数,包括中心点,角度,宽度
:参数 pos :bool,是否生成返回位置映射图
:参数 angle :bool,是否生成返回角度映射图
:参数 width :bool,是否生成返回夹爪宽度映射图
:参数 shape :tuple
:返回 :融合本对象的多个抓取框信息的映射图
'''
if pos:
pos_out = np.zeros(shape)
else:
pos_out = None
if angle:
angle_out = np.zeros(shape)
else:
angle_out = None
if width:
width_out = np.zeros(shape)
else:
width_out = None
for gr in self.grs:
rr,cc = gr.compact_polygon_coords(shape)#shape的指定还是很重要的,可以考虑图像边界
if pos:
pos_out[cc,rr] = 1.0
if angle:
angle_out[cc,rr] = gr.angle
if width:
width_out[cc,rr] = gr.width
return pos_out,angle_out,width_out
grasps = Grasps.load_from_cornell_files(graspf[0])
# +
pos_img,angle_img,width_img = grasps.generate_img(shape = (480,640))
plt.subplot(141)
plt.imshow(cv2.imread(rgbf[0]))
plt.title('raw_img')
plt.subplot(142)
plt.imshow(pos_img)
plt.title('pos_img')
plt.subplot(143)
plt.imshow(angle_img)
plt.title('angle_img')
plt.subplot(144)
plt.imshow(width_img)
plt.title('width_img')
plt.show()
# -
# 可以看到,结果输出是一样的,这个类到目前位置就定义完了,Grasp这个类是针对一个抓取框来将其坐标信息提取转化为中心坐标,角度,宽度等信息的,然后Grasps这个类是将输入同一个对象的多个抓取框信息整合的一个类
# # 3.数据集的封装
# 为了可以更方便地喂入torch的网络,最好使用torh的dataset和dataloader进行封装,这个之前做过一次了,不过用的是Iterable类,这里应该使用map类,不过都差不多,那么就定义一个cornell数据集载入类如下:
# +
# 这个类的定义过程之前在mnist数据集的过程中已经学得比较明确了,直接照搬流程,确定输入输出即可
# 另外,这个地方的一个难点在于,需要将输入和输出都添加封装进来,因为毕竟是数据集嘛,肯定要既有输入也要有target的
import torch
class Cornell(torch.utils.data.Dataset):
#载入cornell数据集的类
def __init__(self,file_dir,include_depth=True,include_rgb=True,start = 0.0,end = 1.0):
'''
:功能 : 数据集封装类的初始化函数,功能包括数据集读取,数据集划分,其他参数初始化等
:参数 file_dir : str,按照官方文档的示例和之前的经验,这里需要读入数据集,所以需要指定数据的存放路径
:参数 include_depth : bool,是否包含depth图像输入
:参数 include_rgb : bool,是否包含rgb图像输入
:参数 start,end : float,为了方便数据集的拆分,这里定义添加两个边界参数start,end
:返回 None
'''
super(Cornell,self).__init__()
#一些参数的传递
self.include_depth = include_depth
self.include_rgb = include_rgb
#去指定路径载入数据集数据
graspf = glob.glob(os.path.join(file_dir,'*','pcd*cpos.txt'))
graspf.sort()
l = len(graspf)
if l == 0:
raise FileNotFoundError('没有查找到数据集,请检查路径{}'.format(file_dir))
rgbf = [filename.replace('cpos.txt','r.png') for filename in graspf]
depthf = [filename.replace('cpos.txt','d.tiff') for filename in graspf]
#按照设定的边界参数对数据进行划分并指定为类的属性
self.graspf = graspf[int(l*start):int(l*end)]
self.rgbf = rgbf[int(l*start):int(l*end)]
self.depthf = depthf[int(l*start):int(l*end)]
@staticmethod
def numpy_to_torch(s):
'''
:功能 :将输入的numpy数组转化为torch张量,并指定数据类型,如果数据没有channel维度,就给它加上这个维度
:参数 s :numpy ndarray,要转换的数组
:返回 :tensor,转换后的torch张量
'''
if len(s.shape) == 2:
return torch.from_numpy(np.expand_dims(s, 0).astype(np.float32))
else:
return torch.from_numpy(s.astype(np.float32))
def get_rgb(self,idx):
'''
:功能 :读取返回指定id的rgb图像
:参数 idx :int,要读取的数据id
:返回 :ndarray,处理好后的rgb图像
'''
rgb_img = Image.from_file(self.rgbf[idx])
rgb_img.normalize()
return rgb_img.img
#因为有时候只输入RGB三通道信息,所以,定义两个返回函数,一个读取RGB一个读取深度
def get_depth(self,idx):
'''
:功能 :读取返回指定id的depth图像
:参数 idx :int,要读取的数据id
:返回 :ndarray,处理好后的depth图像
'''
#目前这个DepthImage类还没有定义,后面仿照Image类给它定义一下
depth_img = DepthImage.from_file(self.depthf[idx])
depth_img.normalize()
return depth_img.img
def get_grasp(self,idx):
'''
:功能 :读取返回指定id的抓取标注参数并将多个抓取框的参数返回融合
:参数 idx :int,要读取的数据id
:参数 pos :bool,是否生成返回位置映射图
:参数 angle :bool,是否生成返回角度映射图
:参数 width :bool,是否生成返回夹爪宽度映射图
:返回 :以图片的方式返回定义一个抓取的多个参数,包括中心点,角度,宽度和长度
'''
grasp_rectangles = Grasps.load_from_cornell_files(self.graspf[idx])
pos_img,angle_img,width_img = grasp_rectangles.generate_img(shape = (480,640))
return pos_img,angle_img,width_img
def __getitem__(self,idx):
# 载入深度图像
if self.include_depth:
depth_img = self.get_depth(idx)
x = self.numpy_to_torch(depth_img)
# 载入rgb图像
if self.include_rgb:
rgb_img = self.get_rgb(idx)
#torch是要求channel-first的,检测一下,如果读进来的图片是channel-last就调整一下,ggcnn中目前我没看到在哪调整的,但肯定是做了的
if rgb_img.shape[2] == 3:
rgb_img = np.moveaxis(rgb_img,2,0)
x = self.numpy_to_torch(rgb_img)
if self.include_depth and self.include_rgb:#如果灰度信息和rgb信息都要的话,就把他们堆到一起构成一个四通道的输入,
x = self.numpy_to_torch(
np.concatenate(
(np.expand_dims(depth_img,0),rgb_img),0
)
)
# 载入抓取标注参数
pos_img,angle_img,width_img = self.get_grasp(idx)
# 处理一下角度信息,因为这个角度值区间比较大,不怎么好处理,所以用两个三角函数把它映射一下:
cos_img = self.numpy_to_torch(np.cos(2*angle_img))
sin_img = self.numpy_to_torch(np.sin(2*angle_img))
pos_img = self.numpy_to_torch(pos_img)
# 限定抓取宽度范围并将其映射到[0,1]
width_img = np.clip(width_img, 0.0, 150.0)/150.0
width_img = self.numpy_to_torch(width_img)
return x,(pos_img,cos_img,sin_img,width_img)
#映射类型的数据集,别忘了定义这个函数
def __len__(self):
return len(self.graspf)
# -
cornell_data = Cornell('../cornell')
dataset = torch.utils.data.DataLoader(cornell_data,batch_size = 64)
for x,y in dataset:
print(x.shape,y[0].shape,y[1].shape,y[2].shape,y[3].shape)
break
# 从上面的shape看起码是没有问题的
# # 4.网络模型的建立
# 数据集处理封装完毕之后,问题的关键就到了建立网络模型预测的阶段,相对而言,这块要跑通的话是比较容易的,关键就是效果好不好的问题,网络名称和结构直接照搬GG-CNN的即可,开干。
# +
import torch.nn as nn
import torch.nn.functional as F
#网络参数定义
filter_sizes = [32, 16, 8, 8, 16, 32]
kernel_sizes = [9, 5, 3, 3, 5, 9]
strides = [3, 2, 2, 2, 2, 3]
class GGCNN(nn.Module):
#定义抓取预测模型的结构、前向传递过程以及损失计算
def __init__(self,input_channels = 1):
'''
:功能 :类初始化函数
:参数 input_channels :int,输入数据的通道数,1或3或4
:返回 :None
'''
super(GGCNN,self).__init__()
#网络结构定义,直接照搬GGCNN 三层卷积三层反卷积
self.conv1 = nn.Conv2d(input_channels,filter_sizes[0],kernel_sizes[0],stride=strides[0],padding=3)
self.conv2 = nn.Conv2d(filter_sizes[0], filter_sizes[1],kernel_sizes[1], stride=strides[1], padding=2)
self.conv3 = nn.Conv2d(filter_sizes[1], filter_sizes[2],kernel_sizes[2], stride=strides[2], padding=1)
self.convt1 = nn.ConvTranspose2d(filter_sizes[2], filter_sizes[3], kernel_sizes[3], stride=strides[3], padding=1, output_padding=1)
self.convt2 = nn.ConvTranspose2d(filter_sizes[3], filter_sizes[4], kernel_sizes[4], stride=strides[4], padding=2, output_padding=1)
self.convt3 = nn.ConvTranspose2d(filter_sizes[4], filter_sizes[5], kernel_sizes[5], stride=strides[5], padding=3, output_padding=1)
self.pos_output = nn.Conv2d(filter_sizes[5], 1, kernel_size=2)
self.cos_output = nn.Conv2d(filter_sizes[5], 1, kernel_size=2)
self.sin_output = nn.Conv2d(filter_sizes[5], 1, kernel_size=2)
self.width_output = nn.Conv2d(filter_sizes[5], 1, kernel_size=2)
#使用Glorot初始化法初始化权重
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
nn.init.xavier_uniform_(m.weight, gain=1)
def forward(self, x):
'''
:功能 :前向传播函数
:参数 x :tensors,一次网络输入
:返回 :tensors,各参数的预测结果
'''
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.convt1(x))
x = F.relu(self.convt2(x))
x = F.relu(self.convt3(x))
pos_output = self.pos_output(x)
cos_output = self.cos_output(x)
sin_output = self.sin_output(x)
width_output = self.width_output(x)
return pos_output, cos_output, sin_output, width_output
def compute_loss(self, xc, yc):
'''
:功能 :损失计算函数
:参数 xc :tensors,一次网络输入
:参数 yc :tensors,网络输入对应真实标注信息
:返回 :dict,各损失和预测结果
'''
y_pos, y_cos, y_sin, y_width = yc
pos_pred, cos_pred, sin_pred, width_pred = self(xc)
p_loss = F.mse_loss(pos_pred, y_pos)
cos_loss = F.mse_loss(cos_pred, y_cos)
sin_loss = F.mse_loss(sin_pred, y_sin)
width_loss = F.mse_loss(width_pred, y_width)
return {
'loss': p_loss + cos_loss + sin_loss + width_loss,
'losses': {
'p_loss': p_loss,
'cos_loss': cos_loss,
'sin_loss': sin_loss,
'width_loss': width_loss
},
'pred': {
'pos': pos_pred,
'cos': cos_pred,
'sin': sin_pred,
'width': width_pred
}
}
# -
net = GGCNN(input_channels=4)
# # 5.如何进行网络的损失计算
#
# 其实经过前面的所有定义,网络的输入输出已经确定了,理论上说,现在就可以开始训练,计算损失,反向传播优化了,但是,有个问题,就是图像尺度匹配的问题,前面我一路写过来,其实是对ggcnn的程序做了大量删减的,力求基本功能可以实现,图像处理,数据增强等这中我都是删减掉了的,所以说我现在的数据集中的图像,输入图像的长宽还是默认的480*640,没有做任何的修剪增强,输出图像的尺度也是设置的480*640,首先这个输入并不是一个方形就不说了,根据上面的网络参数,最后输出的形状是什么我现在都不能确定,可能是要存在问题的,问题的解决方式也很简单,就是把ggcnn中关于图像裁剪这块的程序添加进来即可,但是呢,本着探索的精神,自己做点工作吧,所以,首先我现在想先测试一下网络的输出
#首先准备好一个样本的数据
cornell_data = Cornell('../cornell')
dataset = torch.utils.data.DataLoader(cornell_data,batch_size = 1)
for x,y in dataset:
xc = x
yc = y
break
# 检查一下这个输入输出有没得问题
#先看一下形状
print(xc.shape)
print(yc[0].shape,yc[1].shape,yc[2].shape,yc[3].shape)
# 看上去没啥问题,再看看内容
xc[0].shape
xc[0].data.numpy().shape
plt.figure(figsize = (20,20))
plt.subplot(211)
plt.imshow(xc[0][0].data.numpy())
plt.subplot(212)
plt.imshow(np.moveaxis(xc[0][0:3].data.numpy()*255,0,2))
plt.show()
# 能可视化出来说明没太大问题,颜色看着不正常可能是因为正则化的原因。
yc[0][0].shape
# +
plt.figure(figsize = (15,15))
plt.subplot(221)
plt.imshow(yc[0][0][0].data.numpy())
plt.subplot(222)
plt.imshow(yc[1][0][0].data.numpy())
plt.subplot(223)
plt.imshow(yc[2][0][0].data.numpy())
plt.subplot(224)
plt.imshow(yc[3][0][0].data.numpy())
plt.show()
# -
# 这个看起来也没啥问题
# +
#实例化网络
net = GGCNN(4)
pos,cos,sin,width = net.forward(xc)
# -
pos.shape
cos.shape
sin.shape
width.shape
# 看,这个形状就跟预期想得就不太一样,这样损失的计算肯定就会出问题,解决方式有俩,要么调节网络的参数使得输出匹配,要么调节输入的尺寸让它来经过这个网络之后能输出合适的参数。
# 下面先集成一下之前的这些函数到一个py文件,然后调节好输入输出之后就开始训练
| 25,235 |
/2018/AD/Task/Hometask2.ipynb | e31ac0d5ae8445aa3a3b68004af1afd77312ec7a | [] | no_license | andriygav/School | https://github.com/andriygav/School | 1 | 2 | null | null | null | null | Jupyter Notebook | false | false | .py | 17,804 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# В данном ноутбуке вам нужно оформить метод kNN (k - блжайших соседей)
#
# Для этого желательно просмотреть лекцию https://github.com/andriygav/School/blob/master/2018/Анализ%20данных/Lecture/Lecture2.pdf
#
#
class kNN:
def __init__(self, X_train, y_train, k = 9, M = 2):
# arguments: X_train is a matrix of shape [n, d],
# y_train is a matrix of shape [d],
# k is a number of neighbors which we wanna use
# M is a number of classes
# returns: nothing
# This function should save all data which users give in init
# your code here
pass
def predict(self, X):
# arguments: X is a matrix of shape [n, d],
# where n is a number of data for which I wanna get result
# d is a number of dimensions of data (in this task it's 2)
# returns: Y is a matrix of shape [d]
# This function should return prediction for inoput data X
# your code here
return
# +
# Это данные на которым будем тестить
np.random.seed(0)
Number_of_data_for_one_classes = 100
X_0 = np.random.multivariate_normal(mean = [-1,-1], cov = np.eye(2), size = Number_of_data_for_one_classes)
X_1 = np.random.multivariate_normal(mean = [1,1], cov = np.eye(2), size = Number_of_data_for_one_classes)
Y_0 = np.zeros(Number_of_data_for_one_classes, dtype=np.int64)
Y_1 = np.ones(Number_of_data_for_one_classes, dtype=np.int64)
X = np.vstack([X_0, X_1])
Y = np.hstack([Y_0, Y_1])
# -
X_train, X_test, y_train, y_test = train_test_split(X, Y)
# +
# Покажем размерности данных для тренировочного сэта
print(X_train.shape, y_train.shape)
# +
# Покажем размерности данных для тестового сэта
print(X_test.shape, y_test.shape)
# +
# Изобразим данные, чтобы понимать что мы вообще хотим классифицировать
plt.plot(X_train[np.where(y_train == 0)[0],0], X_train[np.where(y_train == 0)[0],1], 'bo', label = 'class 0')
plt.plot(X_train[np.where(y_train == 1)[0],0], X_train[np.where(y_train == 1)[0],1], 'ro', label = 'class 1')
plt.legend(loc = 'best')
plt.show()
# +
# Создадим объект для метода kNN
# в который дадим тренировочный сэт,
# количество классов, и количество учитываемых соседей
model = kNN(X_train, y_train, M=2, k=9)
# +
# Получим ответы на тесте
y_test_predicter = model.predict(X_test)
# +
# Выведем ответы нашей модели
print(y_test_predicter)
# +
# Выведем истинные ответы
print(y_test)
# +
# Посчитаем точность
print(("Accuracy = %f%c")%(100*np.sum(y_test_predicter == y_test)/y_test.shape[0], '%'))
| 2,973 |
/Practica Functioneel Programmeren.ipynb | 3cbf7336ee9defe800a36dfa6bf354bce972f53c | [] | no_license | wvanbreukelen/practicum-atp | https://github.com/wvanbreukelen/practicum-atp | 0 | 0 | null | 2019-04-17T12:47:39 | 2018-03-14T11:39:22 | null | Jupyter Notebook | false | false | .py | 41,879 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Practica Functioneel Programmeren
# Dit bestand bevat alle practica voor de module Functioneel Programmeren van het vak Advanced Technical Programming. De module beslaat vier colleges, waarvan de eerste drie practicumopdrachten geassocieerd hebben. Dit Jupyter Notebook bevat alle practica; probeer deze na iedere les te maken en in te leveren voor feedback. Natuurlijk kun je ook vast vooruit kijken.
#
# Een Jupyter Notebook bestaat uit cells met code of toelichting. Sommige code-cells hoeven enkel uitgevoerd te worden om functies beschikbaar te maken, anderen moet je zelf in vullen (opdrachten staan duidelijk met kopjes gemarkeerd, je moet dan zelf de delen met `TODO` invullen). Je kunt de code per cel uitvoeren door de cel te selecteren en "run cell" te kiezen.
# De cel direct hieronder laadt de nodige modules in en definiëert enkele typevariabelen die we later nodig hebben. Als je code niet naar verwachting werkt kan het zijn dat je deze cell moet runnen, of een van de andere cels vóór de cel met error. Let erop dat als je de Jupyter-server afsluit en later verder gaat moet je deze cells opnieuw runnen.
# +
from functools import reduce
from typing import List, Callable, TypeVar, Tuple, Union, Generator
from itertools import takewhile
import re
A = TypeVar('A')
B = TypeVar('B')
C = TypeVar('C')
# -
# ### 3.1 — Functies
# #### Opdracht 3.1.1 — To function or not to function [PORTFOLIO]
# Hieronder staat een aantal functies gedefiniëerd. Maar let op, want het zijn niet allemaal daadwerkelijk functies zoals we die bij functioneel willen zien. Welke subroutines zijn functies, en welke niet? Waarom? Maak onderscheid tussen subroutines die alleen intern state gebruiken (en dus verder referentiëel transparant zijn) en functies die daadwerkelijk globale state aanpassen of IO gebruiken.
#
# NB: houdt er rekening mee dat puurheid erfelijk is. Een functie die een niet-pure functie gebruikt is zelf ook niet puur. Voor deze opdracht mag je ervan uitgaan dat alle built-in functies die geen IO doen of side-effects hebben puur zijn (dat is niet het geval, maar het is voor elk van deze functies mogelijk een pure functie te schrijven).
# +
# Geen pure functie. Er is hier sprake van een interne state, de enumerate actie.
def wave(string):
output = ""
for index, letter in enumerate(string):
if index % 2 == 0:
output += letter.upper()
else:
output += letter.lower()
return output
# Geen pure functie. Er is hier sprake van een interne state, de while loop.
def wait_for_password():
password = input("Say the magic word! ")
while password != "secret":
password = input("Say the magic word! ")
# Geen pure functie. De globale state wordt hier aangepast.
def set_width(new_width):
global width
width = new_width
# Wel een pure functie. De globale state wordt niet aangepast. Ook is er niet sprake van een interne state.
def make_sense(temp_in_fahrenheit):
return ((temp_in_fahrenheit+459.67) * 5/9)
# Geen pure functie. De takewhile aanroep is een iteratieve functie.
def latin(word):
vowels = "aeiouAEIOU"
if word[0] in vowels:
return word + "ay"
else:
cons_cluster = "".join(takewhile(lambda x: x not in vowels, word))
return(word[len(cons_cluster):] + cons_cluster + "ay")
# Geen pure functie. Deze functie roept een niet pure functie aan, namelijk latin(word)
def latinise(string):
return re.sub(r'[a-zA-Z]+', lambda m: latin(m.group(0)), string)
# Geen pure functie. Deze functie roept een niet pure functie aan, namelijk latinise(string)
def say_it_in_latin(string):
print(latinise(string))
# -
# ***
# ## College 3
# De opdrachten hieronder dienen gemaakt te worden na afloop van college 3.
# ### 3.2 — Recursie
# Tijdens het college hebben we gezien hoe je met recursie een loop kan simuleren zonder mutable state. Vertaal de "functies" hieronder naar recursieve functies.
# #### Opdracht 3.2.1 — `sum_of_squares`
# +
def sum_of_squares(values: List[float]) -> float:
result = 0
for x in values:
result += x ** 2
return result
def better_sum_of_squares(values: List[float]) -> float:
if len(values) == 0:
return 0
else:
return (values[-1] ** 2) + better_sum_of_squares(values[:-1])
return None
print("Result:", sum_of_squares([1, 2, 3]))
print("Result 2:", better_sum_of_squares([1, 2, 3]))
print("Sum of Squares:", (sum_of_squares([1,2,3]) == better_sum_of_squares([1,2,3])))
# -
# #### Opdracht 3.2.2 — `repeat`
# +
def repeat(item: A, times: int) -> List[A]:
result = []
for _ in range(0, times):
result = result + [item]
return result
def better_repeat(item: A, times: int) -> List[A]:
if times <= 0:
return []
else:
return [item] + better_repeat(item, times - 1)
print("Result:", repeat("X", 3))
print("Result 2:", better_repeat("X", 3))
print("repeat:", (repeat("X", 3) == better_repeat("X", 3)))
# -
# #### Opdracht 3.2.3 — `reverse`
# +
def reverse(list: List[A]) -> List[A]:
result = []
for item in list:
result = [item] + result
return result
def better_reverse(list: List[A]) -> List[A]:
if len(list) == 0:
return []
else:
return [list[-1]] + better_reverse(list[:-1])
print("Result:", reverse([1,2,3,4,5]))
print("Result 2:", better_reverse([1,2,3,4,5]))
print("reverse:", (reverse([1,2,3,4,5]) == better_reverse([1,2,3,4,5])))
# -
# #### Opdracht 3.2.4 — Tail recursion
# Hieronder staat de recursieve functies voor de Fibonacci-getallen, zoals deze in de reader is gedefiniëerd. Maak een versie van deze functie met tail recursie.
# +
def fib_rec(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib_rec(n-1) + fib_rec(n-2)
def fib_tail(n, running_total = 0):
#TODO
return None
print("fib:", (fib_rec(13) == fib_tail(13)))
# -
# ### 3.3 — Anonieme functies
# Zoals in imperatief programmeren een variabele een waarde (een string, een floating-point getal, etc.) kan bevatten, en deze variabelen gebruikt kunnen worden als argumenten en return values, kan dit bij functioneel programmeren ook met functies. Zo kan de functie $\lambda x. x+1$ toegekend worden aan de variabele `succ`. Op deze manier verschilt een lambda-functie niet veel van een subroutine zoals je die gewend bent: `succ_lambda` en `succ_func` zijn in het voorbeeld hieronder equivalent:
# +
def succ_func(x: int) -> int: # niet anoniem
return x+1
succ_lambda = lambda x: x+1 # anoniem en vervolgens gebonden
print(succ_func(41))
print(succ_lambda(41))
# -
# #### Opdracht 3.3.1 — Functies in lijsten
# Maak een lijst van tenminste vier anonieme functies die een geheel getal in een ander geheel getal omzetten.
# Maak vervolgens een functie die elke functie uit de lijst toepast op een getal (natuurlijk met recursie in plaats van een loop), zodat het resultaat een lijst met getallen is. De functie neemt twee argumenten: de lijst van functies en een getal om elke functie op toe te passen.
# +
my_functions = [lambda x: x * 2, lambda x: x * x, lambda x: x ^ 2, lambda x: x / 2] #TODO
def apply_everything(functions: List[Callable[[int], int]], integer: int) -> int:
if len(functions) == 0:
return []
else:
return [functions[0](integer)] + apply_everything(functions[1:], integer)
print(apply_everything(my_functions, 42))
# -
# #### Opdracht 3.3.2 — Functies filteren [PORTFOLIO]
# Maak een tweede recursieve functie, die wederom een getal en een lijst van functies neemt. De functie kijkt voor elke functie uit de lijst kijkt wat het resultaat is, en levert een nieuwe functie-lijst op met enkel die functies waar het resultaat even was.
# +
my_functions = [lambda x: x + 1, lambda x: x + 2, lambda x: x * 2, lambda x: x * 3]
def filter_even_on_input(functions: List[Callable[[int], int]], input: int) \
-> List[Callable[[int], int]]:
if len(functions) == 0:
return []
else:
return ([] if functions[0](input) % 2 else [functions[0]]) + filter_even_on_input(functions[1:], input)
filtered_functions = filter_even_on_input(my_functions, 47)
print(filtered_functions)
print(apply_everything(my_functions, 47))
print(apply_everything(filtered_functions, 47)) # should only be evens
# -
# ***
# ## College 4
# De opdrachten hieronder dienen gemaakt te worden na afloop van college 4.
# ### 4.1 — Hogere-orde functies
# Vorig college hebben we kennis gemaakt met functies als first-class citizens. Vandaag gaan we zien wat ons dit aan expressiviteit op levert. Specifiek het feit dat een functie een parameter kan zijn voor een andere functie. Een functie die een functie als argument heeft noemen we een hogere-orde functie. Voorbeelden zijn `map` die een functie toepast op elk item in een lijst en `reduce` die recursief een lijst samenvoegt. Hieronder passen we bijvoorbeeld de `succ_lambda` toe op ieder element in een lijst:
list(map(succ_lambda, [1,2,3]))
# Het is nu niet nodig de functie aan een variabele toe te kennen (tenzij je de functie vaker nodig hebt natuurlijk); de volgende code doet hetzelfde, maar gebruikt de lambda direct als argument van map:
list(map(lambda x: x+1, [1,2,3]))
# #### Opdracht 4.1.1 — `reduce` met lambda
# Gebruik nu de functie `reduce` en een lambda $\lambda x y . x + 2y$ op de lijst `[1,2,4]`.
# +
#TODO
# -
# #### Opdracht 4.1.2 — `fac`
# Gebruik vervolgens `reduce`, een lambda en `range` om de faculteit van een input getal te geven. Je antwoord moet in de vorm van een lambda toegekend worden aan de variabele `fac`. Hieronder staat code die je zelf aan moet vullen, met een regel om je lambda te testen.
# +
#TODO
# -
# #### Opdracht 4.1.3 — `foldr` met `reduce` [PORTFOLIO]
# Nu gaan we de functie `reduce` (of zoals we hem zijn tegengekomen: `foldl1`) gebruiken om een `foldr` te definiëren:
# +
def foldr(f: Callable[[A,B], B], init: B, list: List[A]) -> B:
if len(list) == 0:
return init
else:
head, *tail = list
return f(head, foldr(f, init, tail))
print(foldr(lambda x, y: x-y, 0, [1,2,3])) # -> 2
# -
# #### Opdracht 4.1.4 — `zip` [PORTFOLIO]
# Tijdens het college hebben we gezien dat `zipWith` uit te drukken is in `zip`. Andersom kan natuurlijk ook: druk `zip` uit met behulp van `zipWith`.
# +
def zip_with(f : Callable[[A, B], C], xs : List[A], ys : List[B]) -> List[C]:
if len(xs) == 0 or len(ys) == 0:
# Base case
return []
else:
x, *xrest = xs
y, *yrest = ys
return [f(x,y)] + zip_with(f, xrest, yrest)
def zip(xs : List[A], ys : List[B]) -> List[Tuple[A,B]]:
return zip_with(lambda x, y: (x, y), xs, ys)
print(zip([1,2],[3,4])) # --> [(1, 3), (2, 4)]
print(zip([1,2],[3])) # --> [(1, 3)]
print(zip([1,2],[])) # --> []
# -
# ***
# ## College 5
# De opdrachten hieronder dienen gemaakt te worden na afloop van college 5.
# ### 5.1 — Recursieve Datatypes
# In de slides hebben we een recursieve definitie van de natuurlijke getallen gezien, de zogenaamde Peano nummers. Hieronder zie je een voorbeeld-implementatie in Python:
# +
class Peano: # Python staat niet toe dat we Peano als type gebruiken voordat de klasse gedefinieerd is.
def phantom(): # Daarom definieren we hem twee keer. De tweede keer bestaat Peano reeds en kunnen
return None # we deze als type gebruiken.
class Peano:
def __init__(self, x: Union[Peano,None]) -> Peano:
self.val = x
@classmethod
def succ(cls, x: Union[Peano,None]) -> Peano:
return Peano(x)
@classmethod
def zero(cls) -> Peano:
return Peano(None)
def __int__(self):
if self.val == None:
return 0
else:
return 1 + int(self.val)
def __str__(self):
if self.val == None:
return "Z"
else:
return "S(" + str(self.val) + ")"
peano_zero = Peano.zero()
peano_one = Peano.succ(peano_zero) # Peano(Peano.zero)
peano_two = Peano.succ(peano_one) # Peano(Peano(Peano.zero))
print(peano_zero)
print(peano_one)
print(peano_two)
print(int(peano_zero))
print(int(peano_one))
print(int(peano_two))
# -
# #### Opdracht 5.1.1 — Natuurlijke getallen
# Er zijn echter meerdere recursieve definities te bedenken; een daarvan gaan jullie zelf in Python definiëren. Deze ziet er als volgt uit:
#
# - $0$ is een natuurlijk getal
# - Als $n$ een natuurlijk getal is, dan is $2n$ een natuurlijk getal
# - Als $n$ een natuurlijk getal is, dan is $2n+1$ een natuurlijk getal
#
# Ook met deze definitie zijn alle natuurlijke getallen te definiëren. Maar, _don't take my word for it_, en check het zelf: Definiëer een datatype voor de natuurlijke getallen zoals hierboven beschreven. Defineer variabelen `one`, `two`...`twenty` in deze notatie. Schrijf ook overal de type-annotaties bij!
#
# Wat valt je op? Is er een formule te verzinnen om gegeven een getal te bepalen hoeveel constructors genest moeten worden? Is er een snelle manier om, zeg, 6561 in deze notatie om te zetten?
#
# *Bonus:* wat moeten we aanpassen om $0$ als waarde te verbieden?
# +
class Number:
def phantom():
return None # Hier hoef je niets aan te doen
class Number:
def __init__(self):
None #TODO
@classmethod
def d(cls, v): # d = Double
None #TODO
@classmethod
def p(cls, v): # p = double Plus one
None #TODO
@classmethod
def z(cls): # z = Zero
None #TODO
def __int__(self): # waarde weergeven als int
None #TODO
def __str__(self): # waarde weergeven als string van de vorm P(D(P(P(Z))))
None #TODO
z = Number.z()
def d(v: Number) -> Number:
return Number.d(v)
def p(v: Number) -> Number:
return Number.p(v)
zero = z # 0
one = p(z) # 1
two = None #TODO
three = None #TODO
four = None #TODO
five = None #TODO
six = None #TODO
seven = None #TODO
eight = None #TODO
nine = None #TODO
ten = None #TODO
eleven = None #TODO
twelve = None #TODO
thirteen = None #TODO
fourteen = None #TODO
fifteen = None #TODO
sixteen = None #TODO
seventeen = None #TODO
eighteen = None #TODO
nineteen = None #TODO
twenty = None #TODO
""" # Zodra alles is ingevuld kun je deze map gebruiken om alle getallen te printen. Let op! Om deze map te laten
# werken moeten er geen None elementen in de lijst meer zitten en moet de functie int() voor alle elementen
# gedefinieerd zijn (alles moet dus een Number zijn, en __int__(self) moet voor Number geimplementeerd zijn.
# Tot die tijd krijg je een error, dus kun je de map beter tussen tripple quotes laten staan...
list(map(lambda num: print(int(num), num), [zero, one, two, three, four, five, six, seven, eight, nine, ten,
eleven, twelve, thirteen, fourteen, fifteen, sixteen, seventeen,
eighteen, nineteen, twenty]))
"""
# -
# ### 5.2 — Functionele Datastructuren
# Hieronder zien we een iets meer uitgebreid voorbeeld van een functionele datastructuur. Functies die de datastructuur aanpassen hebben de nieuwe structuur als return value, en het origineel blijft ongewijzigd.
#
# #### Opdracht 5.2.1 — Poor man's Git [PORTFOLIO]
# De datastructuur in deze opdracht geeft een elementaire boomstructuur (specifiek, een Rose Tree) waarin een revisie-geschiedenis bewaard wordt. De positie binnen de boom (de versie die "checked out" is) bevindt zich altijd in de root van de boom en kan dus snel opgevraagd worden. Het deel van de boom boven de root wordt in een `thread` variabele opgeslagen. Bij een `descend(hash)` wordt de node met `hash` de nieuwe root, en komt de oude root in de thread van de nieuwe root te staan. Bij een `ascend()` wordt deze van de thread afgehaald en als nieuwe root teruggegeven. We willen elk element in de boom slechts één keer opslaan; bij een `descend(hash)` is de nieuwe root niet ook nog steeds het kind van de oude root (die nu in de `thread` zit).
#
# <img src="zipper.png" />
#
# Een intuitieve manier om naar `descend(hash)` te kijken is alsof de hele boom aan de nieuwe root wordt opgetild. Alles wat zich boven de root bevond komt er nu onder te hangen.
#
# Vul de code aan waar `#TODO` staat en beantwoord de volgende vragen:
#
# - Wat zijn de sommen en producten in het type History?
# - Ascend en Descend worden genegeerd als ze onmogelijk zijn (een ascend vanuit de root node levert de root node op). Het nadeel van deze aanpak is dat ascend(descend(x,_)) == descend(ascend(x,_)) == x niet op gaat. Beschrijf een aanpak om dit wel zo te maken.
# - Wat zou een alternatieve manier zijn om met behulp van een som-type met falende ascends/descends om te gaan? Zou de bovenstaande gelijkheid met deze aanpak wel op gaan?
#
# Tip: schrijf een `__str__()` functie voor de klassen, zodat je kan zien wat er gebeurt...
# +
class Rev:
"""Phantom class om Rev als type te kunnen gebruiken."""
def phantom():
return None
class History:
"""Phantom class om History als type te kunnen gebruiken."""
def phantom():
return None
class Rev:
"""Klasse/datatype voor een revisie met description (commit message) en hash (voor nu gewoon een meegegeven int)."""
def __init__(self, desc: str, hash: int) -> Rev:
"""Constructor"""
self.__desc = desc
self.__hash = hash
def desc(self) -> str:
"""Getter voor de description."""
return self.__desc
def hash(self) -> int:
"""Getter voor de hash."""
return self.__hash
def update(self, desc: Union[None, str] = None, hash: Union [None, int] = None) -> Rev:
"""Schrijf een functie die de description en hash update. Denk eraan dat we een functionele datastructuur willen,
en dat we dus een nieuwe instantie maken in plaats van de oude aan te passen! De nieuwe desc en hash kunnen
None zijn, in welk geval de oude gebruikt wordt."""
if not hash and not desc:
# Return myself
return self
else:
# Return a new instance
return Rev(desc, hash)
def is_hash(self, hash: int) -> bool:
"""Predicaat: Is this the Revision we're looking for?"""
return self.__hash == hash
class History:
"""Node in een Rose-tree van Rev's."""
def __init__(self, revision: Rev, children: List[History] = [], thread: Union[None,History] = None) -> History:
"""Constructor"""
self.rev = revision
self.children = children
self.thread = thread
def add_child(self, hist: History) -> History:
"""Schrijf een functie die een nieuwe subhistory toevoegt."""
# Append to children and return new instance.
return History(self.rev, self.children + [hist], self.thread)
def remove_child(self, hash: int) -> History:
"""Schrijf een functie die een subhistory verwijdert. Op het moment van een descend(hash) wordt de huidige node
in de thread opgeslagen. De oude root moet de nieuwe niet langer als child beschouwen (zie afbeelding)."""
return History(self.rev, list(filter(lambda x: not x.is_hash(hash), self.children)), self.thread)
def commit(self, rev: Rev) -> History:
"""Maak een History-object voor de nieuwe revision aan, hang deze onder de huidige root en descend."""
return self.add_child(History(rev)).descend(rev.hash())
def descend(self, hash: int) -> History:
"""Descend naar een child node met een gegeven hash."""
def find_correct_child(children: List[History]) -> Union[None, History]:
"""Zoek recursief de children door naar een child met de juiste hash..."""
if len(children) > 0:
head, *tail = children
if head.is_hash(hash): # Als gevonden: child wordt de nieuwe node, oude node zonder child in de thread
return head.add_to_thread(self.remove_child(hash))
else:
return find_correct_child(tail) # Anders zoeken we verder
else: # De lijst is leeg, er is niets gevonden
return self # We veranderen niets
return find_correct_child(self.children)
def ascend(self) -> History:
"""Schrijf een functie die ascend naar de bovenliggende node. Als er geen bovenliggende node in de thread is verandert
er niets."""
# Check above node within thread exists. If so, return myself.
if not self.thread:
return self
else:
# print(self.thread)
return self.thread.add_child(self.remove_thread())
def update(self, action: Callable[[Rev],Rev]) -> History:
"""Schrijf een functie die een (lambda) van Rev naar Rev accepteert en een nieuwe History maakt waarbij de meegegeven
action op de Rev is toegepast."""
return History(action(self.rev), self.children, self.thread)
def remove_thread(self) -> History:
"""Verwijdert de thread voor bij een ascend."""
return History(self.rev, self.children, None)
def add_to_thread(self, parent: History) -> History:
"""Zet de oude parent in de thread."""
return History(self.rev, self.children, parent)
def root(self) -> History:
"""Schrijf een functie die recursief blijft ascenden tot er geen thread meer is (maw: zoek de initial commit.)"""
# Is there a thread?
if not self.thread:
return self
else:
# Go deeper.
return self.ascend().root()
def head(self) -> Rev:
"""Getter voor de revisie van deze node, i.e. de huidige HEAD."""
return self.rev
def is_hash(self, hash: int) -> bool:
"""Predicaat: komt de hash van de huidige Rev overeen met de hash die we zoeken?"""
return self.rev.is_hash(hash)
def prettyDumpChildren(self, children : List[History]) -> str:
""" Pretty dump all children within a tree in string format. """
if not len(children):
return ''
elif len(children) == 1:
return str(children[0])
head, *tail = children
return str(head) + ',' + self.prettyDumpChildren(tail)
def __str__(self):
return '<History: ' + self.rev.desc() + ' with children [' + self.prettyDumpChildren(self.children) + '] and thread ' + str(self.thread)
h = History(Rev("Initial commit",1))
h2 = h.commit(Rev("Added README.md",2))
print("No children: ", h)
print("After commit: ", h2)
print("Reascended: ", h2.ascend())
print("Invalid descend (no change): ", h.descend(3))
print("Invalid ascend (no change): ", h.ascend())
complex = History(Rev("Initial commit",1)).commit(Rev("Added README.md",2)).commit(Rev("Actual work",4)).root() \
.commit(Rev("Fork, as READMEs are for wimps!",3))
print("Complex: ", complex)
print("Complex, ascend: ", complex.ascend())
print("Updating revision desc of head: ", h2.update(lambda x: x.update(desc = "Altered space and time!")))
""" Als alles goed is ingevuld zou het resultaat er zo uit moeten zien:
No children: <History: "Initial commit" with children [] and thread None>
After commit: <History: "Added README.md" with children [] and thread <History: "Initial commit" with children [] and thread None>>
Reascended: <History: "Initial commit" with children [<History: "Added README.md" with children [] and thread None>] and thread None>
Invalid descend (no change): <History: "Initial commit" with children [] and thread None>
Invalid ascend (no change): <History: "Initial commit" with children [] and thread None>
Complex: <History: "Fork, as READMEs are for wimps!" with children [] and thread <History: "Initial commit" with children [<History: "Added README.md" with children [<History: "Actual work" with children [] and thread None>] and thread None>] and thread None>>
Complex, ascend: <History: "Initial commit" with children [<History: "Added README.md" with children [<History: "Actual work" with children [] and thread None>] and thread None>, <History: "Fork, as READMEs are for wimps!" with children [] and thread None>] and thread None>
Updating revision desc of head: <History: "Altered space and time!" with children [] and thread <History: "Initial commit" with children [] and thread None>>
"""; None
# -
# ***
# ## College 6
# De opdracht hieronder dient gemaakt te worden na afloop van college 6. Deze week is er slechts één opdracht en deze hoeft niet in het portfolio. De opdracht dient als een introductie van concepten die jullie kunnen gebruiken bij de eindopdracht van dit vak.
#
# ### 6.1 — Functional Reactive Programming en Comprehensions
# In deze opdracht gaan we aan de slag met FRP: manipulatie van (potentieel eindeloze) streams data, waarbij resultaten constant berekend worden telkens als er nieuwe invoer beschikbaar is. In Python gebruiken we hiervoor generators, die technisch gezien niet functioneel zijn (maar in plaats daarvan een interne state gebruiken).
#
# Een eigenschap van generators is dat we deze als (oneindige) lijsten kunnen gebruiken. Om tijdens development infinite loops te voorkomen is de lengte van streams echter beperkt. Generators in Python zijn stateful, wat betekent dat we ieder element dat we uit de generator halen "verbruikt" hebben. Bij meerdere analyses kan het dus zijn dat je data opraakt. Om dit te voorkomen kun je `tee()` uit `itertools` gebruiken.
#
# We maken voor het beschrijven van generators gebruik van *lijst comprehensies*, of in dit geval generator comprehensies vanwege de potentiële eindeloosheid van de streams. Het verschil is dat de generator *lazy* is, en enkel waardes uitrekent wanneer deze gevraagd worden. Lijsten in Python zijn *eager*, wat betekent dat deze meteen worden uitgerekend. Omdat onze streams eindeloos moeten kunnen zijn is dit geen optie. In notatie is het enige verschil dat lijstcomprehensies de vierkante haken gebruiken `[f(a) for a in stream() if p(a)]`.
#
# Beide vormen worden gebruikt om collecties te maken op basis van invoer lijsten / streams, functies en predicaten (functies naar `bool`, e.g. `isEven(i: int) -> bool`). In het voorbeeld hierboven wordt ieder element uit `stream()` een voor een aan `a` gebonden, en als `p(a)` gelijk is aan `True` wordt `f(a)` aan de uitvoer toegevoegd. `p(a)` kan gezien worden als een filter, en `f(a)` als een functie die over de lijst gemapt wordt.
# #### Opdracht 6.1.1 — Pathfinder
# Breid de code hieronder uit met nog 3 analyses:
# * bereken de variance op de temperatuur
# * bereken de gemiddelde hoogte over een periode
# * verzin zelf nog een interessante analyse (je mag meetwaardes aan de data_stream generator toevoegen)
# +
from random import random
from collections import namedtuple
from statistics import mean
import math
Measurement = namedtuple('Measurement', 't lon lat h temp')
def data_stream() -> Generator[Measurement, None, None]:
"""Deze generator stelt de data van onze pathfinder voor. Iedere 24 uur stuurt de robot een update met de huidige locatie,
hoogte in meters boven NMP en de gemeten temperatuur in Kelvin. Daarnaast krijgt onze pathfinder per dag een 5% kans om
te stoppen met communiceren en leeft deze maximaal 1000 dagen. Op deze manier werken we niet met echt eindeloze lijsten:
hoewel dit in principe moet kunnen kan een klein foutje tot eindeloze loops en kernel crashes leiden. Zodra je ervan
overtuigd bent dat je code hiermee om kan gaan kun je de if-conditie in de loop weghalen en zien wat er gebeurt.
"""
t = 0
lon = 19.13
lat = 33.22
alive = True
while alive:
lon += 0.5 * random() - 0.25
lat += 0.5 * random() - 0.25
t += 1
height = 100 * math.sin(lon) * abs(math.cos(lat)) ** 0.5
temp = 230 + 50 * random()
#if random() > 0.95 or t > 1000: # Comment these lines for endless fun!
# alive = False # Uncomment them for a sandbox.
yield Measurement(t, lon, lat, height, temp)
def below_NMP(m: Measurement) -> bool:
return m.h < 0
def temperature_in_celsius(m: Measurement) -> float:
return m.temp - 273.15
def safe_mean(xs: Generator[float, None, None]) -> Union[None, float]:
"""mean(xs) is geen totale functie en kan errors geven. Beter checken we de lengte van de invoer, maar helaas heeft een
generator geen lengte."""
try:
return mean(xs)
except StatisticsError:
return None
def analyse():
"""Hier worden de temperature_in_celsius transformatie en het below_NMP predicaat gebruikt om een stream te genereren van
celsius-temperaturen van alle datapunten onder "zeeniveau". Hier kunnen we een deel van pakken om een gemiddelde te
berekenen (merk op dat het niet mogelijk is een gemiddelde van een oneindige stream te nemen)."""
temps_below_NMP = (temperature_in_celsius(point) for point in data_stream() if below_NMP(point))
print("Mean temperature below NMP (first 100 measurements): ", safe_mean(itertools.islice(temps_below_NMP, 100)))
print("Mean temperature below NMP (second 100 measurements): ", safe_mean(itertools.islice(temps_below_NMP, 100)))
print("Mean temperature below NMP (third 100 measurements: ", safe_mean(itertools.islice(temps_below_NMP, 100)))
analyse()
| 29,883 |
/Semi_Project 4 Find Waldo/train.ipynb | d43f3c7b195a3c5b0f53d787660063c50a4bc32b | [] | no_license | jaytoone/A.I_Image | https://github.com/jaytoone/A.I_Image | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 4,339,026 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="qJGqC_vHyHVi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="05673ea6-77df-4f41-a0ba-55b90118cd58"
#Imports
import os
import matplotlib.pyplot as plt
import torchvision.datasets
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.autograd import Variable
###############################################################################
def train(model, device, train_set_loader, optimizer, epoch, logging_interval=100):
# This method is derived from:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
# Was licensed BSD-3-clause
model.train()
for batch_idx, (data, target) in enumerate(train_set_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % logging_interval == 0:
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct = pred.eq(target.view_as(pred)).float().mean().item()
print('Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f} Accuracy: {:.2f}%'.format(
epoch, batch_idx * len(data), len(train_set_loader.dataset),
100. * batch_idx / len(train_set_loader), loss.item(),
100. * correct))
return loss.item()
import time
def train_many_epochs(model, epochs = 10):
lr = 0.1
start = time.time()
loss_ot = []
acc_ot = []
for epoch in range(epochs):
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.5)
loss = train(model, device, train_set_loader, optimizer, epoch, logging_interval=10)
acc = test(model, device, test_set_loader)
loss_ot.append(loss)
acc_ot.append(acc)
lr = lr/2.0
end = time.time()
total_time = end - start
print(total_time)
return loss_ot, acc_ot, total_time
def test(model, device, test_set_loader):
# This method is derived from:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
# Was licensed BSD-3-clause
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_set_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# Note: with `reduce=True`, I'm not sure what would happen with a final batch size
# that would be smaller than regular previous batch sizes. For now it works.
test_loss += F.nll_loss(output, target, reduce=True).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_set_loader.dataset)
print("")
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
test_loss,
correct, len(test_set_loader.dataset),
100. * correct / len(test_set_loader.dataset)))
print("")
return 100. * correct / len(test_set_loader.dataset)
def download_CIFAR10(data_path):
if not os.path.exists(data_path):
os.mkdir(data_path)
transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
training_set = torchvision.datasets.CIFAR10(data_path, train=True, transform=transformation, download=True)
testing_set = torchvision.datasets.CIFAR10(data_path, train=False, transform=transformation, download=True)
return training_set, testing_set
###############################################################################
batch_size = 1000
DATA_PATH = './data'
training_set, testing_set = download_CIFAR10(DATA_PATH)
train_set_loader = torch.utils.data.DataLoader(
dataset=training_set,
batch_size=batch_size,
shuffle=True)
test_set_loader = torch.utils.data.DataLoader(
dataset=testing_set,
batch_size=batch_size,
shuffle=False)
###############################################################################
# Use GPU whever possible!
import torch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
###############################################################################
class SpikingNeuronLayerRNN(nn.Module):
def __init__(self, device, n_inputs=28*28, n_hidden=100, decay_multiplier=0.9, threshold=2.0, penalty_threshold=2.5):
super(SpikingNeuronLayerRNN, self).__init__()
self.device = device
self.n_inputs = n_inputs
self.n_hidden = n_hidden
self.decay_multiplier = decay_multiplier
self.threshold = threshold
self.penalty_threshold = penalty_threshold
self.fc = nn.Linear(n_inputs, n_hidden)
self.init_parameters()
self.reset_state()
self.to(self.device)
def init_parameters(self):
for param in self.parameters():
if param.dim() >= 2:
nn.init.xavier_uniform_(param)
def reset_state(self):
self.prev_inner = torch.zeros([self.n_hidden]).to(self.device)
self.prev_outer = torch.zeros([self.n_hidden]).to(self.device)
def forward(self, x):
"""
Call the neuron at every time step.
x: activated_neurons_below
return: a tuple of (state, output) for each time step. Each item in the tuple
are then themselves of shape (batch_size, n_hidden) and are PyTorch objects, such
that the whole returned would be of shape (2, batch_size, n_hidden) if casted.
"""
if self.prev_inner.dim() == 1:
# Adding batch_size dimension directly after doing a `self.reset_state()`:
batch_size = x.shape[0]
self.prev_inner = torch.stack(batch_size * [self.prev_inner])
self.prev_outer = torch.stack(batch_size * [self.prev_outer])
# 1. Weight matrix multiplies the input x
input_excitation = self.fc(x)
# 2. We add the result to a decayed version of the information we already had.
inner_excitation = input_excitation + self.prev_inner * self.decay_multiplier
# 3. We compute the activation of the neuron to find its output value,
# but before the activation, there is also a negative bias that refrain thing from firing too much.
outer_excitation = F.relu(inner_excitation - self.threshold)
# 4. If the neuron fires, the activation of the neuron is subtracted to its inner state
# (and with an extra penalty for increase refractory time),
# because it discharges naturally so it shouldn't fire twice.
do_penalize_gate = (outer_excitation > 0).float()
# TODO: remove following /2?
inner_excitation = inner_excitation - (self.penalty_threshold/self.threshold * inner_excitation) * do_penalize_gate
# 5. The outer excitation has a negative part after the positive part.
outer_excitation = outer_excitation #+ torch.abs(self.prev_outer) * self.decay_multiplier / 2.0
# 6. Setting internal values before returning.
# And the returning value is the one of the previous time step to delay
# activation of 1 time step of "processing" time. For logits, we don't take activation.
delayed_return_state = self.prev_inner
delayed_return_output = self.prev_outer
self.prev_inner = inner_excitation
self.prev_outer = outer_excitation
return delayed_return_state, delayed_return_output
###############################################################################
class InputDataToSpikingPerceptronLayer(nn.Module):
def __init__(self, device):
super(InputDataToSpikingPerceptronLayer, self).__init__()
self.device = device
self.reset_state()
self.to(self.device)
def reset_state(self):
# self.prev_state = torch.zeros([self.n_hidden]).to(self.device)
pass
def forward(self, x, is_2D=True):
x = x.view(x.size(0), -1) # Flatten 2D image to 1D for FC
random_activation_perceptron = torch.rand(x.shape).to(self.device)
return random_activation_perceptron * x
###############################################################################
class OutputDataToSpikingPerceptronLayer(nn.Module):
def __init__(self, average_output=True):
"""
average_output: might be needed if this is used within a regular neural net as a layer.
Otherwise, sum may be numerically more stable for gradients with setting average_output=False.
"""
super(OutputDataToSpikingPerceptronLayer, self).__init__()
if average_output:
self.reducer = lambda x, dim: x.sum(dim=dim)
else:
self.reducer = lambda x, dim: x.mean(dim=dim)
def forward(self, x):
if type(x) == list:
x = torch.stack(x)
return self.reducer(x, 0)
###############################################################################
class SpikingNet(nn.Module):
def __init__(self, device, n_time_steps, begin_eval):
super(SpikingNet, self).__init__()
assert (0 <= begin_eval and begin_eval < n_time_steps)
self.device = device
self.n_time_steps = n_time_steps
self.begin_eval = begin_eval
self.input_conversion = InputDataToSpikingPerceptronLayer(device)
self.layer1 = SpikingNeuronLayerRNN(
device, n_inputs=3*32*32, n_hidden=100,
decay_multiplier=0.9, threshold=1.0, penalty_threshold=1.5
)
self.layer2 = SpikingNeuronLayerRNN(
device, n_inputs=100, n_hidden=10,
decay_multiplier=0.9, threshold=1.0, penalty_threshold=1.5
)
self.output_conversion = OutputDataToSpikingPerceptronLayer(average_output=False) # Sum on outputs.
self.to(self.device)
def forward_through_time(self, x):
"""
This acts as a layer. Its input is non-time-related, and its output too.
So the time iterations happens inside, and the returned layer is thus
passed through global average pooling on the time axis before the return
such as to be able to mix this pipeline with regular backprop layers such
as the input data and the output data.
"""
self.input_conversion.reset_state()
self.layer1.reset_state()
self.layer2.reset_state()
out = []
all_layer1_states = []
all_layer1_outputs = []
all_layer2_states = []
all_layer2_outputs = []
for _ in range(self.n_time_steps):
xi = self.input_conversion(x)
# For layer 1, we take the regular output.
layer1_state, layer1_output = self.layer1(xi)
# We take inner state of layer 2 because it's pre-activation and thus acts as out logits.
layer2_state, layer2_output = self.layer2(layer1_output)
all_layer1_states.append(layer1_state)
all_layer1_outputs.append(layer1_output)
all_layer2_states.append(layer2_state)
all_layer2_outputs.append(layer2_output)
out.append(layer2_state)
out = self.output_conversion(out[self.begin_eval:])
return out, [[all_layer1_states, all_layer1_outputs], [all_layer2_states, all_layer2_outputs]]
def forward(self, x):
out, _ = self.forward_through_time(x)
return F.log_softmax(out, dim=-1)
def visualize_all_neurons(self, x):
assert x.shape[0] == 1 and len(x.shape) == 4, (
"Pass only 1 example to SpikingNet.visualize(x) with outer dimension shape of 1.")
_, layers_state = self.forward_through_time(x)
for i, (all_layer_states, all_layer_outputs) in enumerate(layers_state):
layer_state = torch.stack(all_layer_states).data.cpu().numpy().squeeze().transpose()
layer_output = torch.stack(all_layer_outputs).data.cpu().numpy().squeeze().transpose()
self.plot_layer(layer_state, title="Inner state values of neurons for layer {}".format(i))
self.plot_layer(layer_output, title="Output spikes (activation) values of neurons for layer {}".format(i))
def visualize_neuron(self, x, layer_idx, neuron_idx):
assert x.shape[0] == 1 and len(x.shape) == 4, (
"Pass only 1 example to SpikingNet.visualize(x) with outer dimension shape of 1.")
_, layers_state = self.forward_through_time(x)
all_layer_states, all_layer_outputs = layers_state[layer_idx]
layer_state = torch.stack(all_layer_states).data.cpu().numpy().squeeze().transpose()
layer_output = torch.stack(all_layer_outputs).data.cpu().numpy().squeeze().transpose()
self.plot_neuron(layer_state[neuron_idx], title="Inner state values neuron {} of layer {}".format(neuron_idx, layer_idx))
self.plot_neuron(layer_output[neuron_idx], title="Output spikes (activation) values of neuron {} of layer {}".format(neuron_idx, layer_idx))
def plot_layer(self, layer_values, title):
"""
This function is derived from:
https://github.com/guillaume-chevalier/LSTM-Human-Activity-Recognition
Which was released under the MIT License.
"""
width = max(16, layer_values.shape[0] / 8)
height = max(4, layer_values.shape[1] / 8)
plt.figure(figsize=(width, height))
plt.imshow(
layer_values,
interpolation="nearest",
cmap=plt.cm.rainbow
)
plt.title(title)
plt.colorbar()
plt.xlabel("Time")
plt.ylabel("Neurons of layer")
plt.show()
def plot_neuron(self, neuron_through_time, title):
width = max(16, len(neuron_through_time) / 8)
height = 4
plt.figure(figsize=(width, height))
plt.title(title)
plt.plot(neuron_through_time)
plt.xlabel("Time")
plt.ylabel("Neuron's activation")
plt.show()
###############################################################################
class NonSpikingNet(nn.Module):
def __init__(self):
super(NonSpikingNet, self).__init__()
self.layer1 = nn.Linear(3* 32* 32, 100)
self.layer2 = nn.Linear(100, 10)
def forward(self, x, is_2D=True):
x = x.view(x.size(0), -1) # Flatten 2D image to 1D for FC
x = F.relu(self.layer1(x))
x = self.layer2(x)
return F.log_softmax(x, dim=-1)
# + id="QetMQNKHzEQA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="391b2858-28d5-4121-e41e-df75fbd27b86"
# Replicate original experiment on CIFAR10
###############################################################################
num_epoch = 15
time_step_list = [128/1,128/2,128/4,128/8,128/16,128/32,128/64]
total_time_models = []
fig, ax_lst = plt.subplots(2, len(time_step_list)+1, sharex=True, sharey='row',figsize=(30,10))
for i,step in enumerate(time_step_list):
spiking_model = SpikingNet(device, n_time_steps=int(step), begin_eval=0)
loss, acc, time_m = train_many_epochs(spiking_model, epochs = num_epoch)
total_time_models.append(time_m)
ax_lst[0,i].plot(acc)
ax_lst[0,i].set_title("Spiking Acc, n time steps = %d"%int(step))
ax_lst[1,i].plot(loss)
ax_lst[1,i].set_title("Spiking Loss, n time steps = %d"%int(step))
non_spiking_model = NonSpikingNet().to(device)
loss, acc, time_m = train_many_epochs(non_spiking_model, epochs = num_epoch)
ax_lst[0,len(time_step_list)].plot(acc)
ax_lst[0,len(time_step_list)].set_title("NN Acc")
ax_lst[1,len(time_step_list)].plot(loss)
ax_lst[1,len(time_step_list)].set_title("NN Loss")
plt.show()
plt.figure(figsize=(10,10))
plt.plot(time_step_list, total_time_models)
plt.show()
# + id="21oR-86l0cyQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="983782a8-f178-4f8a-9126-a64ff0f6f42d"
###############################################################################
import numpy as np
data, target = test_set_loader.__iter__().__next__()
# taking 1st testing example:
x = torch.stack([data[0]])
y = target.data.numpy()[0]
plt.figure(figsize=(2,2))
plt.imshow(x.data[0].cpu().numpy().transpose(1,2,0)+.5)
plt.title("Input image x of label y={}:".format(y))
plt.show()
# plotting neuron's activations:
spiking_model.visualize_all_neurons(x.to(device))
print("A hidden neuron that looks excited:")
spiking_model.visualize_neuron(x.to(device), layer_idx=0, neuron_idx=0)
print("The output neuron of the label:")
spiking_model.visualize_neuron(x.to(device), layer_idx=1, neuron_idx=y)
# + id="94BLtpVCzbYz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="9ef1c7db-eff5-42e3-b32d-1ee1cadcdec3"
#Imports
import os
import matplotlib.pyplot as plt
import torchvision.datasets
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.autograd import Variable
###############################################################################
def train(model, device, train_set_loader, optimizer, epoch, logging_interval=100):
# This method is derived from:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
# Was licensed BSD-3-clause
model.train()
for batch_idx, (data, target) in enumerate(train_set_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % logging_interval == 0:
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct = pred.eq(target.view_as(pred)).float().mean().item()
print('Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f} Accuracy: {:.2f}%'.format(
epoch, batch_idx * len(data), len(train_set_loader.dataset),
100. * batch_idx / len(train_set_loader), loss.item(),
100. * correct))
return loss.item()
import time
def train_many_epochs(model, epochs = 10):
lr = 0.1
start = time.time()
loss_ot = []
acc_ot = []
for epoch in range(epochs):
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.5)
loss = train(model, device, train_set_loader, optimizer, epoch, logging_interval=10)
acc = test(model, device, test_set_loader)
loss_ot.append(loss)
acc_ot.append(acc)
lr = lr/2.0
end = time.time()
total_time = end - start
print(total_time)
return loss_ot, acc_ot, total_time
def test(model, device, test_set_loader):
# This method is derived from:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
# Was licensed BSD-3-clause
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_set_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# Note: with `reduce=True`, I'm not sure what would happen with a final batch size
# that would be smaller than regular previous batch sizes. For now it works.
test_loss += F.nll_loss(output, target, reduce=True).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_set_loader.dataset)
print("")
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
test_loss,
correct, len(test_set_loader.dataset),
100. * correct / len(test_set_loader.dataset)))
print("")
return 100. * correct / len(test_set_loader.dataset)
def download_CIFAR10(data_path):
if not os.path.exists(data_path):
os.mkdir(data_path)
transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
training_set = torchvision.datasets.CIFAR10(data_path, train=True, transform=transformation, download=True)
testing_set = torchvision.datasets.CIFAR10(data_path, train=False, transform=transformation, download=True)
return training_set, testing_set
###############################################################################
batch_size = 1000
DATA_PATH = './data'
training_set, testing_set = download_CIFAR10(DATA_PATH)
train_set_loader = torch.utils.data.DataLoader(
dataset=training_set,
batch_size=batch_size,
shuffle=True)
test_set_loader = torch.utils.data.DataLoader(
dataset=testing_set,
batch_size=batch_size,
shuffle=False)
###############################################################################
# Use GPU whever possible!
import torch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
###############################################################################
class SpikingNeuronLayerRNN(nn.Module):
def __init__(self, device, n_inputs=28*28, n_hidden=100, decay_multiplier=0.9, threshold=2.0, penalty_threshold=2.5):
super(SpikingNeuronLayerRNN, self).__init__()
self.device = device
self.n_inputs = n_inputs
self.n_hidden = n_hidden
self.decay_multiplier = decay_multiplier
self.threshold = threshold
self.penalty_threshold = penalty_threshold
self.fc = nn.Linear(n_inputs, n_hidden)
self.init_parameters()
self.reset_state()
self.to(self.device)
def init_parameters(self):
for param in self.parameters():
if param.dim() >= 2:
nn.init.xavier_uniform_(param)
def reset_state(self):
self.prev_inner = torch.zeros([self.n_hidden]).to(self.device)
self.prev_outer = torch.zeros([self.n_hidden]).to(self.device)
def forward(self, x):
"""
Call the neuron at every time step.
x: activated_neurons_below
return: a tuple of (state, output) for each time step. Each item in the tuple
are then themselves of shape (batch_size, n_hidden) and are PyTorch objects, such
that the whole returned would be of shape (2, batch_size, n_hidden) if casted.
"""
if self.prev_inner.dim() == 1:
# Adding batch_size dimension directly after doing a `self.reset_state()`:
batch_size = x.shape[0]
self.prev_inner = torch.stack(batch_size * [self.prev_inner])
self.prev_outer = torch.stack(batch_size * [self.prev_outer])
# 1. Weight matrix multiplies the input x
input_excitation = self.fc(x)
# 2. We add the result to a decayed version of the information we already had.
inner_excitation = input_excitation + self.prev_inner * self.decay_multiplier
# 3. We compute the activation of the neuron to find its output value,
# but before the activation, there is also a negative bias that refrain thing from firing too much.
outer_excitation = F.relu(inner_excitation - self.threshold)
# 4. If the neuron fires, the activation of the neuron is subtracted to its inner state
# (and with an extra penalty for increase refractory time),
# because it discharges naturally so it shouldn't fire twice.
do_penalize_gate = (outer_excitation > 0).float()
# TODO: remove following /2?
inner_excitation = inner_excitation - (self.penalty_threshold/self.threshold * inner_excitation) * do_penalize_gate
# 5. The outer excitation has a negative part after the positive part.
outer_excitation = outer_excitation #+ torch.abs(self.prev_outer) * self.decay_multiplier / 2.0
# 6. Setting internal values before returning.
# And the returning value is the one of the previous time step to delay
# activation of 1 time step of "processing" time. For logits, we don't take activation.
delayed_return_state = self.prev_inner
delayed_return_output = self.prev_outer
self.prev_inner = inner_excitation
self.prev_outer = outer_excitation
return delayed_return_state, delayed_return_output
###############################################################################
class SpikingNeuronLayerCRNN(nn.Module):
def __init__(self, device, n_hidden=64, decay_multiplier=0.9, threshold=2.0, penalty_threshold=2.5):
super(SpikingNeuronLayerCRNN, self).__init__()
self.device = device
self.n_hidden = n_hidden
self.decay_multiplier = decay_multiplier
self.threshold = threshold
self.penalty_threshold = penalty_threshold
self.conv = torch.nn.Sequential(
nn.Conv2d(3, 6, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(6, 12, 5),
nn.ReLU(),
)
self.fc = torch.nn.Sequential(
nn.Linear(12 * 10 * 10, 100),
nn.ReLU()
)
self.init_parameters()
self.reset_state()
self.to(self.device)
def init_parameters(self):
for param in self.parameters():
if param.dim() >= 2:
nn.init.xavier_uniform_(param)
def reset_state(self):
self.prev_inner = torch.zeros([self.n_hidden]).to(self.device)
self.prev_outer = torch.zeros([self.n_hidden]).to(self.device)
def forward(self, x):
"""
Call the neuron at every time step.
x: activated_neurons_below
return: a tuple of (state, output) for each time step. Each item in the tuple
are then themselves of shape (batch_size, n_hidden) and are PyTorch objects, such
that the whole returned would be of shape (2, batch_size, n_hidden) if casted.
"""
if self.prev_inner.dim() == 1:
# Adding batch_size dimension directly after doing a `self.reset_state()`:
batch_size = x.shape[0]
self.prev_inner = torch.stack(batch_size * [self.prev_inner])
self.prev_outer = torch.stack(batch_size * [self.prev_outer])
# 1. Weight matrix multiplies the input x
conv_excitation = self.conv(x)
conv_excitation = conv_excitation.view(-1, 12 * 10 * 10)
input_excitation = self.fc(conv_excitation)
# 2. We add the result to a decayed version of the information we already had.
inner_excitation = input_excitation + self.prev_inner * self.decay_multiplier
# 3. We compute the activation of the neuron to find its output value,
# but before the activation, there is also a negative bias that refrain thing from firing too much.
outer_excitation = F.relu(inner_excitation - self.threshold)
# 4. If the neuron fires, the activation of the neuron is subtracted to its inner state
# (and with an extra penalty for increase refractory time),
# because it discharges naturally so it shouldn't fire twice.
do_penalize_gate = (outer_excitation > 0).float()
# TODO: remove following /2?
inner_excitation = inner_excitation - (self.penalty_threshold/self.threshold * inner_excitation) * do_penalize_gate
# 5. The outer excitation has a negative part after the positive part.
outer_excitation = outer_excitation #+ torch.abs(self.prev_outer) * self.decay_multiplier / 2.0
# 6. Setting internal values before returning.
# And the returning value is the one of the previous time step to delay
# activation of 1 time step of "processing" time. For logits, we don't take activation.
delayed_return_state = self.prev_inner
delayed_return_output = self.prev_outer
self.prev_inner = inner_excitation
self.prev_outer = outer_excitation
return delayed_return_state, delayed_return_output
###############################################################################
class InputDataToSpikingPerceptronLayer(nn.Module):
def __init__(self, device):
super(InputDataToSpikingPerceptronLayer, self).__init__()
self.device = device
self.reset_state()
self.to(self.device)
def reset_state(self):
# self.prev_state = torch.zeros([self.n_hidden]).to(self.device)
pass
def forward(self, x, is_2D=True):
#x = x.view(x.size(0), -1) # Flatten 2D image to 1D for FC
random_activation_perceptron = torch.rand(x.shape).to(self.device)
return random_activation_perceptron * x
###############################################################################
class OutputDataToSpikingPerceptronLayer(nn.Module):
def __init__(self, average_output=True):
"""
average_output: might be needed if this is used within a regular neural net as a layer.
Otherwise, sum may be numerically more stable for gradients with setting average_output=False.
"""
super(OutputDataToSpikingPerceptronLayer, self).__init__()
if average_output:
self.reducer = lambda x, dim: x.sum(dim=dim)
else:
self.reducer = lambda x, dim: x.mean(dim=dim)
def forward(self, x):
if type(x) == list:
x = torch.stack(x)
return self.reducer(x, 0)
###############################################################################
class SpikingConvNet(nn.Module):
def __init__(self, device, n_time_steps, begin_eval):
super(SpikingConvNet, self).__init__()
assert (0 <= begin_eval and begin_eval < n_time_steps)
self.device = device
self.n_time_steps = n_time_steps
self.begin_eval = begin_eval
self.input_conversion = InputDataToSpikingPerceptronLayer(device)
self.layer1 = SpikingNeuronLayerCRNN(
device, n_hidden=100,
decay_multiplier=0.9, threshold=1.0, penalty_threshold=1.5
)
self.layer2 = SpikingNeuronLayerRNN(
device, n_inputs=100, n_hidden=10,
decay_multiplier=0.9, threshold=1.0, penalty_threshold=1.5
)
self.output_conversion = OutputDataToSpikingPerceptronLayer(average_output=False) # Sum on outputs.
self.to(self.device)
def forward_through_time(self, x):
"""
This acts as a layer. Its input is non-time-related, and its output too.
So the time iterations happens inside, and the returned layer is thus
passed through global average pooling on the time axis before the return
such as to be able to mix this pipeline with regular backprop layers such
as the input data and the output data.
"""
self.input_conversion.reset_state()
self.layer1.reset_state()
self.layer2.reset_state()
out = []
all_layer1_states = []
all_layer1_outputs = []
all_layer2_states = []
all_layer2_outputs = []
for _ in range(self.n_time_steps):
xi = self.input_conversion(x)
# For layer 1, we take the regular output.
layer1_state, layer1_output = self.layer1(xi)
# We take inner state of layer 2 because it's pre-activation and thus acts as out logits.
layer2_state, layer2_output = self.layer2(layer1_output)
all_layer1_states.append(layer1_state)
all_layer1_outputs.append(layer1_output)
all_layer2_states.append(layer2_state)
all_layer2_outputs.append(layer2_output)
out.append(layer2_state)
out = self.output_conversion(out[self.begin_eval:])
return out, [[all_layer1_states, all_layer1_outputs], [all_layer2_states, all_layer2_outputs]]
def forward(self, x):
out, _ = self.forward_through_time(x)
return F.log_softmax(out, dim=-1)
def visualize_all_neurons(self, x):
assert x.shape[0] == 1 and len(x.shape) == 4, (
"Pass only 1 example to SpikingNet.visualize(x) with outer dimension shape of 1.")
_, layers_state = self.forward_through_time(x)
for i, (all_layer_states, all_layer_outputs) in enumerate(layers_state):
layer_state = torch.stack(all_layer_states).data.cpu().numpy().squeeze().transpose()
layer_output = torch.stack(all_layer_outputs).data.cpu().numpy().squeeze().transpose()
self.plot_layer(layer_state, title="Inner state values of neurons for layer {}".format(i))
self.plot_layer(layer_output, title="Output spikes (activation) values of neurons for layer {}".format(i))
def visualize_neuron(self, x, layer_idx, neuron_idx):
assert x.shape[0] == 1 and len(x.shape) == 4, (
"Pass only 1 example to SpikingNet.visualize(x) with outer dimension shape of 1.")
_, layers_state = self.forward_through_time(x)
all_layer_states, all_layer_outputs = layers_state[layer_idx]
layer_state = torch.stack(all_layer_states).data.cpu().numpy().squeeze().transpose()
layer_output = torch.stack(all_layer_outputs).data.cpu().numpy().squeeze().transpose()
self.plot_neuron(layer_state[neuron_idx], title="Inner state values neuron {} of layer {}".format(neuron_idx, layer_idx))
self.plot_neuron(layer_output[neuron_idx], title="Output spikes (activation) values of neuron {} of layer {}".format(neuron_idx, layer_idx))
def plot_layer(self, layer_values, title):
"""
This function is derived from:
https://github.com/guillaume-chevalier/LSTM-Human-Activity-Recognition
Which was released under the MIT License.
"""
width = max(16, layer_values.shape[0] / 8)
height = max(4, layer_values.shape[1] / 8)
plt.figure(figsize=(width, height))
plt.imshow(
layer_values,
interpolation="nearest",
cmap=plt.cm.rainbow
)
plt.title(title)
plt.colorbar()
plt.xlabel("Time")
plt.ylabel("Neurons of layer")
plt.show()
def plot_neuron(self, neuron_through_time, title):
width = max(16, len(neuron_through_time) / 8)
height = 4
plt.figure(figsize=(width, height))
plt.title(title)
plt.plot(neuron_through_time)
plt.xlabel("Time")
plt.ylabel("Neuron's activation")
plt.show()
###############################################################################
class NonSpikingNet(nn.Module):
def __init__(self):
super(NonSpikingNet, self).__init__()
self.layer1 = nn.Linear(3* 32* 32, 100)
self.layer2 = nn.Linear(100, 10)
def forward(self, x, is_2D=True):
x = x.view(x.size(0), -1) # Flatten 2D image to 1D for FC
x = F.relu(self.layer1(x))
x = self.layer2(x)
return F.log_softmax(x, dim=-1)
# + id="Imu-WBbyAr1A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="915296fd-ea8f-4ba3-9581-d8238afce175"
###############################################################################
num_epoch = 15
time_step_list = [128/2,128/4,128/8,128/16,128/32,128/64]
total_time_models = []
fig, ax_lst = plt.subplots(2, len(time_step_list)+1, sharex=True, sharey='row',figsize=(30,10))
for i,step in enumerate(time_step_list):
spiking_model = SpikingConvNet(device, n_time_steps=int(step), begin_eval=0)
loss, acc, time_m = train_many_epochs(spiking_model, epochs = num_epoch)
total_time_models.append(time_m)
ax_lst[0,i].plot(acc)
ax_lst[0,i].set_title("Spiking Acc, n time steps = %d"%int(step))
ax_lst[1,i].plot(loss)
ax_lst[1,i].set_title("Spiking Loss, n time steps = %d"%int(step))
non_spiking_model = NonSpikingNet().to(device)
loss, acc, time_m = train_many_epochs(non_spiking_model, epochs = num_epoch)
ax_lst[0,len(time_step_list)].plot(acc)
ax_lst[0,len(time_step_list)].set_title("NN Acc")
ax_lst[1,len(time_step_list)].plot(loss)
ax_lst[1,len(time_step_list)].set_title("NN Loss")
plt.show()
plt.figure(figsize=(10,10))
plt.plot(time_step_list, total_time_models)
plt.show()
# + id="e-JgGLA93Pjc" colab_type="code" colab={}
###############################################################################
data, target = test_set_loader.__iter__().__next__()
# taking 1st testing example:
x = torch.stack([data[0]])
y = target.data.numpy()[0]
plt.figure(figsize=(12,12))
plt.imshow(x.data.cpu().numpy()[0,0])
plt.title("Input image x of label y={}:".format(y))
plt.show()
# plotting neuron's activations:
spiking_model.visualize_all_neurons(x)
print("A hidden neuron that looks excited:")
spiking_model.visualize_neuron(x, layer_idx=0, neuron_idx=0)
print("The output neuron of the label:")
spiking_model.visualize_neuron(x, layer_idx=1, neuron_idx=y)
# + [markdown] id="5EKqJTuMs2lq" colab_type="text"
# MNIST
# + id="D_kqPMQ-sl5-" colab_type="code" colab={}
#Imports
import os
import matplotlib.pyplot as plt
import torchvision.datasets
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.autograd import Variable
###############################################################################
def train(model, device, train_set_loader, optimizer, epoch, logging_interval=100):
# This method is derived from:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
# Was licensed BSD-3-clause
model.train()
for batch_idx, (data, target) in enumerate(train_set_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % logging_interval == 0:
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct = pred.eq(target.view_as(pred)).float().mean().item()
print('Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f} Accuracy: {:.2f}%'.format(
epoch, batch_idx * len(data), len(train_set_loader.dataset),
100. * batch_idx / len(train_set_loader), loss.item(),
100. * correct))
return loss.item()
import time
def train_many_epochs(model, epochs = 10):
lr = 0.1
start = time.time()
loss_ot = []
acc_ot = []
for epoch in range(epochs):
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.5)
loss = train(model, device, train_set_loader, optimizer, epoch, logging_interval=10)
acc = test(model, device, test_set_loader)
loss_ot.append(loss)
acc_ot.append(acc)
lr = lr/2.0
end = time.time()
total_time = end - start
print(total_time)
return loss_ot, acc_ot, total_time
def test(model, device, test_set_loader):
# This method is derived from:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
# Was licensed BSD-3-clause
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_set_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# Note: with `reduce=True`, I'm not sure what would happen with a final batch size
# that would be smaller than regular previous batch sizes. For now it works.
test_loss += F.nll_loss(output, target, reduce=True).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_set_loader.dataset)
print("")
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
test_loss,
correct, len(test_set_loader.dataset),
100. * correct / len(test_set_loader.dataset)))
print("")
return 100. * correct / len(test_set_loader.dataset)
def download_mnist(data_path):
if not os.path.exists(data_path):
os.mkdir(data_path)
transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
training_set = torchvision.datasets.MNIST(data_path, train=True, transform=transformation, download=True)
testing_set = torchvision.datasets.MNIST(data_path, train=False, transform=transformation, download=True)
return training_set, testing_set
###############################################################################
batch_size = 1000
DATA_PATH = './data'
training_set, testing_set = download_mnist(DATA_PATH)
train_set_loader = torch.utils.data.DataLoader(
dataset=training_set,
batch_size=batch_size,
shuffle=True)
test_set_loader = torch.utils.data.DataLoader(
dataset=testing_set,
batch_size=batch_size,
shuffle=False)
###############################################################################
# Use GPU whever possible!
import torch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
###############################################################################
class SpikingNeuronLayerRNN(nn.Module):
def __init__(self, device, n_inputs=28*28, n_hidden=100, decay_multiplier=0.9, threshold=2.0, penalty_threshold=2.5):
super(SpikingNeuronLayerRNN, self).__init__()
self.device = device
self.n_inputs = n_inputs
self.n_hidden = n_hidden
self.decay_multiplier = decay_multiplier
self.threshold = threshold
self.penalty_threshold = penalty_threshold
self.fc = nn.Linear(n_inputs, n_hidden)
self.init_parameters()
self.reset_state()
self.to(self.device)
def init_parameters(self):
for param in self.parameters():
if param.dim() >= 2:
nn.init.xavier_uniform_(param)
def reset_state(self):
self.prev_inner = torch.zeros([self.n_hidden]).to(self.device)
self.prev_outer = torch.zeros([self.n_hidden]).to(self.device)
def forward(self, x):
"""
Call the neuron at every time step.
x: activated_neurons_below
return: a tuple of (state, output) for each time step. Each item in the tuple
are then themselves of shape (batch_size, n_hidden) and are PyTorch objects, such
that the whole returned would be of shape (2, batch_size, n_hidden) if casted.
"""
if self.prev_inner.dim() == 1:
# Adding batch_size dimension directly after doing a `self.reset_state()`:
batch_size = x.shape[0]
self.prev_inner = torch.stack(batch_size * [self.prev_inner])
self.prev_outer = torch.stack(batch_size * [self.prev_outer])
# 1. Weight matrix multiplies the input x
input_excitation = self.fc(x)
# 2. We add the result to a decayed version of the information we already had.
inner_excitation = input_excitation + self.prev_inner * self.decay_multiplier
# 3. We compute the activation of the neuron to find its output value,
# but before the activation, there is also a negative bias that refrain thing from firing too much.
outer_excitation = F.relu(inner_excitation - self.threshold)
# 4. If the neuron fires, the activation of the neuron is subtracted to its inner state
# (and with an extra penalty for increase refractory time),
# because it discharges naturally so it shouldn't fire twice.
do_penalize_gate = (outer_excitation > 0).float()
# TODO: remove following /2?
inner_excitation = inner_excitation - (self.penalty_threshold/self.threshold * inner_excitation) * do_penalize_gate
# 5. The outer excitation has a negative part after the positive part.
outer_excitation = outer_excitation #+ torch.abs(self.prev_outer) * self.decay_multiplier / 2.0
# 6. Setting internal values before returning.
# And the returning value is the one of the previous time step to delay
# activation of 1 time step of "processing" time. For logits, we don't take activation.
delayed_return_state = self.prev_inner
delayed_return_output = self.prev_outer
self.prev_inner = inner_excitation
self.prev_outer = outer_excitation
return delayed_return_state, delayed_return_output
###############################################################################
class InputDataToSpikingPerceptronLayer(nn.Module):
def __init__(self, device):
super(InputDataToSpikingPerceptronLayer, self).__init__()
self.device = device
self.reset_state()
self.to(self.device)
def reset_state(self):
# self.prev_state = torch.zeros([self.n_hidden]).to(self.device)
pass
def forward(self, x, is_2D=True):
x = x.view(x.size(0), -1) # Flatten 2D image to 1D for FC
random_activation_perceptron = torch.rand(x.shape).to(self.device)
return random_activation_perceptron * x
###############################################################################
class OutputDataToSpikingPerceptronLayer(nn.Module):
def __init__(self, average_output=True):
"""
average_output: might be needed if this is used within a regular neural net as a layer.
Otherwise, sum may be numerically more stable for gradients with setting average_output=False.
"""
super(OutputDataToSpikingPerceptronLayer, self).__init__()
if average_output:
self.reducer = lambda x, dim: x.sum(dim=dim)
else:
self.reducer = lambda x, dim: x.mean(dim=dim)
def forward(self, x):
if type(x) == list:
x = torch.stack(x)
return self.reducer(x, 0)
###############################################################################
class SpikingNet(nn.Module):
def __init__(self, device, n_time_steps, begin_eval):
super(SpikingNet, self).__init__()
assert (0 <= begin_eval and begin_eval < n_time_steps)
self.device = device
self.n_time_steps = n_time_steps
self.begin_eval = begin_eval
self.input_conversion = InputDataToSpikingPerceptronLayer(device)
self.layer1 = SpikingNeuronLayerRNN(
device, n_inputs=1*28*28, n_hidden=100,
decay_multiplier=0.9, threshold=1.0, penalty_threshold=1.5
)
self.layer2 = SpikingNeuronLayerRNN(
device, n_inputs=100, n_hidden=10,
decay_multiplier=0.9, threshold=1.0, penalty_threshold=1.5
)
self.output_conversion = OutputDataToSpikingPerceptronLayer(average_output=False) # Sum on outputs.
self.to(self.device)
def forward_through_time(self, x):
"""
This acts as a layer. Its input is non-time-related, and its output too.
So the time iterations happens inside, and the returned layer is thus
passed through global average pooling on the time axis before the return
such as to be able to mix this pipeline with regular backprop layers such
as the input data and the output data.
"""
self.input_conversion.reset_state()
self.layer1.reset_state()
self.layer2.reset_state()
out = []
all_layer1_states = []
all_layer1_outputs = []
all_layer2_states = []
all_layer2_outputs = []
for _ in range(self.n_time_steps):
xi = self.input_conversion(x)
# For layer 1, we take the regular output.
layer1_state, layer1_output = self.layer1(xi)
# We take inner state of layer 2 because it's pre-activation and thus acts as out logits.
layer2_state, layer2_output = self.layer2(layer1_output)
all_layer1_states.append(layer1_state)
all_layer1_outputs.append(layer1_output)
all_layer2_states.append(layer2_state)
all_layer2_outputs.append(layer2_output)
out.append(layer2_state)
out = self.output_conversion(out[self.begin_eval:])
return out, [[all_layer1_states, all_layer1_outputs], [all_layer2_states, all_layer2_outputs]]
def forward(self, x):
out, _ = self.forward_through_time(x)
return F.log_softmax(out, dim=-1)
def visualize_all_neurons(self, x):
assert x.shape[0] == 1 and len(x.shape) == 4, (
"Pass only 1 example to SpikingNet.visualize(x) with outer dimension shape of 1.")
_, layers_state = self.forward_through_time(x)
for i, (all_layer_states, all_layer_outputs) in enumerate(layers_state):
layer_state = torch.stack(all_layer_states).data.cpu().numpy().squeeze().transpose()
layer_output = torch.stack(all_layer_outputs).data.cpu().numpy().squeeze().transpose()
self.plot_layer(layer_state, title="Inner state values of neurons for layer {}".format(i))
self.plot_layer(layer_output, title="Output spikes (activation) values of neurons for layer {}".format(i))
def visualize_neuron(self, x, layer_idx, neuron_idx):
assert x.shape[0] == 1 and len(x.shape) == 4, (
"Pass only 1 example to SpikingNet.visualize(x) with outer dimension shape of 1.")
_, layers_state = self.forward_through_time(x)
all_layer_states, all_layer_outputs = layers_state[layer_idx]
layer_state = torch.stack(all_layer_states).data.cpu().numpy().squeeze().transpose()
layer_output = torch.stack(all_layer_outputs).data.cpu().numpy().squeeze().transpose()
self.plot_neuron(layer_state[neuron_idx], title="Inner state values neuron {} of layer {}".format(neuron_idx, layer_idx))
self.plot_neuron(layer_output[neuron_idx], title="Output spikes (activation) values of neuron {} of layer {}".format(neuron_idx, layer_idx))
def plot_layer(self, layer_values, title):
"""
This function is derived from:
https://github.com/guillaume-chevalier/LSTM-Human-Activity-Recognition
Which was released under the MIT License.
"""
width = max(16, layer_values.shape[0] / 8)
height = max(4, layer_values.shape[1] / 8)
plt.figure(figsize=(width, height))
plt.imshow(
layer_values,
interpolation="nearest",
cmap=plt.cm.rainbow
)
plt.title(title)
plt.colorbar()
plt.xlabel("Time")
plt.ylabel("Neurons of layer")
plt.show()
def plot_neuron(self, neuron_through_time, title):
width = max(16, len(neuron_through_time) / 8)
height = 4
plt.figure(figsize=(width, height))
plt.title(title)
plt.plot(neuron_through_time)
plt.xlabel("Time")
plt.ylabel("Neuron's activation")
plt.show()
###############################################################################
class NonSpikingNet(nn.Module):
def __init__(self):
super(NonSpikingNet, self).__init__()
self.layer1 = nn.Linear(1* 28* 28, 100)
self.layer2 = nn.Linear(100, 10)
def forward(self, x, is_2D=True):
x = x.view(x.size(0), -1) # Flatten 2D image to 1D for FC
x = F.relu(self.layer1(x))
x = self.layer2(x)
return F.log_softmax(x, dim=-1)
# + id="RgdEWmIytGKR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="28071c55-603a-4ea6-db0b-354ff9515f94"
# Replicate original experiment (MNIST)
###############################################################################
num_epoch = 15
time_step_list = [128/1,128/2,128/4,128/8,128/16,128/32,128/64]
total_time_models = []
fig, ax_lst = plt.subplots(2, len(time_step_list)+1, sharex=True, sharey='row',figsize=(30,10))
for i,step in enumerate(time_step_list):
spiking_model = SpikingNet(device, n_time_steps=int(step), begin_eval=0)
loss, acc, time_m = train_many_epochs(spiking_model, epochs = num_epoch)
total_time_models.append(time_m)
ax_lst[0,i].plot(acc)
ax_lst[0,i].set_title("Spiking Acc, n time steps = %d"%int(step))
ax_lst[1,i].plot(loss)
ax_lst[1,i].set_title("Spiking Loss, n time steps = %d"%int(step))
non_spiking_model = NonSpikingNet().to(device)
loss, acc, time_m = train_many_epochs(non_spiking_model, epochs = num_epoch)
ax_lst[0,len(time_step_list)].plot(acc)
ax_lst[0,len(time_step_list)].set_title("NN Acc")
ax_lst[1,len(time_step_list)].plot(loss)
ax_lst[1,len(time_step_list)].set_title("NN Loss")
plt.show()
plt.figure(figsize=(10,10))
plt.plot(time_step_list, total_time_models)
plt.show()
# + id="XIomty4ItOgX" colab_type="code" colab={}
#Imports
import os
import matplotlib.pyplot as plt
import torchvision.datasets
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.autograd import Variable
###############################################################################
def train(model, device, train_set_loader, optimizer, epoch, logging_interval=100):
# This method is derived from:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
# Was licensed BSD-3-clause
model.train()
for batch_idx, (data, target) in enumerate(train_set_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % logging_interval == 0:
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct = pred.eq(target.view_as(pred)).float().mean().item()
print('Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f} Accuracy: {:.2f}%'.format(
epoch, batch_idx * len(data), len(train_set_loader.dataset),
100. * batch_idx / len(train_set_loader), loss.item(),
100. * correct))
return loss.item()
import time
def train_many_epochs(model, epochs = 10):
lr = 0.1
start = time.time()
loss_ot = []
acc_ot = []
for epoch in range(epochs):
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.5)
loss = train(model, device, train_set_loader, optimizer, epoch, logging_interval=10)
acc = test(model, device, test_set_loader)
loss_ot.append(loss)
acc_ot.append(acc)
lr = lr/2.0
end = time.time()
total_time = end - start
print(total_time)
return loss_ot, acc_ot, total_time
def test(model, device, test_set_loader):
# This method is derived from:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
# Was licensed BSD-3-clause
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_set_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# Note: with `reduce=True`, I'm not sure what would happen with a final batch size
# that would be smaller than regular previous batch sizes. For now it works.
test_loss += F.nll_loss(output, target, reduce=True).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_set_loader.dataset)
print("")
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
test_loss,
correct, len(test_set_loader.dataset),
100. * correct / len(test_set_loader.dataset)))
print("")
return 100. * correct / len(test_set_loader.dataset)
def download_mnist(data_path):
if not os.path.exists(data_path):
os.mkdir(data_path)
transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
training_set = torchvision.datasets.MNIST(data_path, train=True, transform=transformation, download=True)
testing_set = torchvision.datasets.MNIST(data_path, train=False, transform=transformation, download=True)
return training_set, testing_set
###############################################################################
batch_size = 1000
DATA_PATH = './data'
training_set, testing_set = download_mnist(DATA_PATH)
train_set_loader = torch.utils.data.DataLoader(
dataset=training_set,
batch_size=batch_size,
shuffle=True)
test_set_loader = torch.utils.data.DataLoader(
dataset=testing_set,
batch_size=batch_size,
shuffle=False)
###############################################################################
# Use GPU whever possible!
import torch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
###############################################################################
class SpikingNeuronLayerRNN(nn.Module):
def __init__(self, device, n_inputs=28*28, n_hidden=100, decay_multiplier=0.9, threshold=2.0, penalty_threshold=2.5):
super(SpikingNeuronLayerRNN, self).__init__()
self.device = device
self.n_inputs = n_inputs
self.n_hidden = n_hidden
self.decay_multiplier = decay_multiplier
self.threshold = threshold
self.penalty_threshold = penalty_threshold
self.fc = nn.Linear(n_inputs, n_hidden)
self.init_parameters()
self.reset_state()
self.to(self.device)
def init_parameters(self):
for param in self.parameters():
if param.dim() >= 2:
nn.init.xavier_uniform_(param)
def reset_state(self):
self.prev_inner = torch.zeros([self.n_hidden]).to(self.device)
self.prev_outer = torch.zeros([self.n_hidden]).to(self.device)
def forward(self, x):
"""
Call the neuron at every time step.
x: activated_neurons_below
return: a tuple of (state, output) for each time step. Each item in the tuple
are then themselves of shape (batch_size, n_hidden) and are PyTorch objects, such
that the whole returned would be of shape (2, batch_size, n_hidden) if casted.
"""
if self.prev_inner.dim() == 1:
# Adding batch_size dimension directly after doing a `self.reset_state()`:
batch_size = x.shape[0]
self.prev_inner = torch.stack(batch_size * [self.prev_inner])
self.prev_outer = torch.stack(batch_size * [self.prev_outer])
# 1. Weight matrix multiplies the input x
input_excitation = self.fc(x)
# 2. We add the result to a decayed version of the information we already had.
inner_excitation = input_excitation + self.prev_inner * self.decay_multiplier
# 3. We compute the activation of the neuron to find its output value,
# but before the activation, there is also a negative bias that refrain thing from firing too much.
outer_excitation = F.relu(inner_excitation - self.threshold)
# 4. If the neuron fires, the activation of the neuron is subtracted to its inner state
# (and with an extra penalty for increase refractory time),
# because it discharges naturally so it shouldn't fire twice.
do_penalize_gate = (outer_excitation > 0).float()
# TODO: remove following /2?
inner_excitation = inner_excitation - (self.penalty_threshold/self.threshold * inner_excitation) * do_penalize_gate
# 5. The outer excitation has a negative part after the positive part.
outer_excitation = outer_excitation #+ torch.abs(self.prev_outer) * self.decay_multiplier / 2.0
# 6. Setting internal values before returning.
# And the returning value is the one of the previous time step to delay
# activation of 1 time step of "processing" time. For logits, we don't take activation.
delayed_return_state = self.prev_inner
delayed_return_output = self.prev_outer
self.prev_inner = inner_excitation
self.prev_outer = outer_excitation
return delayed_return_state, delayed_return_output
###############################################################################
class SpikingNeuronLayerCRNN(nn.Module):
def __init__(self, device, n_hidden=64, decay_multiplier=0.9, threshold=2.0, penalty_threshold=2.5):
super(SpikingNeuronLayerCRNN, self).__init__()
self.device = device
self.n_hidden = n_hidden
self.decay_multiplier = decay_multiplier
self.threshold = threshold
self.penalty_threshold = penalty_threshold
self.conv = torch.nn.Sequential(
nn.Conv2d(1, 6, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(6, 12, 5),
nn.ReLU(),
)
self.fc = torch.nn.Sequential(
nn.Linear(12 * 8 * 8, 100),
nn.ReLU()
)
self.init_parameters()
self.reset_state()
self.to(self.device)
def init_parameters(self):
for param in self.parameters():
if param.dim() >= 2:
nn.init.xavier_uniform_(param)
def reset_state(self):
self.prev_inner = torch.zeros([self.n_hidden]).to(self.device)
self.prev_outer = torch.zeros([self.n_hidden]).to(self.device)
def forward(self, x):
"""
Call the neuron at every time step.
x: activated_neurons_below
return: a tuple of (state, output) for each time step. Each item in the tuple
are then themselves of shape (batch_size, n_hidden) and are PyTorch objects, such
that the whole returned would be of shape (2, batch_size, n_hidden) if casted.
"""
if self.prev_inner.dim() == 1:
# Adding batch_size dimension directly after doing a `self.reset_state()`:
batch_size = x.shape[0]
self.prev_inner = torch.stack(batch_size * [self.prev_inner])
self.prev_outer = torch.stack(batch_size * [self.prev_outer])
# 1. Weight matrix multiplies the input x
conv_excitation = self.conv(x)
conv_excitation = conv_excitation.view(-1, 12 * 8 * 8)
input_excitation = self.fc(conv_excitation)
# 2. We add the result to a decayed version of the information we already had.
inner_excitation = input_excitation + self.prev_inner * self.decay_multiplier
# 3. We compute the activation of the neuron to find its output value,
# but before the activation, there is also a negative bias that refrain thing from firing too much.
outer_excitation = F.relu(inner_excitation - self.threshold)
# 4. If the neuron fires, the activation of the neuron is subtracted to its inner state
# (and with an extra penalty for increase refractory time),
# because it discharges naturally so it shouldn't fire twice.
do_penalize_gate = (outer_excitation > 0).float()
# TODO: remove following /2?
inner_excitation = inner_excitation - (self.penalty_threshold/self.threshold * inner_excitation) * do_penalize_gate
# 5. The outer excitation has a negative part after the positive part.
outer_excitation = outer_excitation #+ torch.abs(self.prev_outer) * self.decay_multiplier / 2.0
# 6. Setting internal values before returning.
# And the returning value is the one of the previous time step to delay
# activation of 1 time step of "processing" time. For logits, we don't take activation.
delayed_return_state = self.prev_inner
delayed_return_output = self.prev_outer
self.prev_inner = inner_excitation
self.prev_outer = outer_excitation
return delayed_return_state, delayed_return_output
###############################################################################
class InputDataToSpikingPerceptronLayer(nn.Module):
def __init__(self, device):
super(InputDataToSpikingPerceptronLayer, self).__init__()
self.device = device
self.reset_state()
self.to(self.device)
def reset_state(self):
# self.prev_state = torch.zeros([self.n_hidden]).to(self.device)
pass
def forward(self, x, is_2D=True):
#x = x.view(x.size(0), -1) # Flatten 2D image to 1D for FC
random_activation_perceptron = torch.rand(x.shape).to(self.device)
return random_activation_perceptron * x
###############################################################################
class OutputDataToSpikingPerceptronLayer(nn.Module):
def __init__(self, average_output=True):
"""
average_output: might be needed if this is used within a regular neural net as a layer.
Otherwise, sum may be numerically more stable for gradients with setting average_output=False.
"""
super(OutputDataToSpikingPerceptronLayer, self).__init__()
if average_output:
self.reducer = lambda x, dim: x.sum(dim=dim)
else:
self.reducer = lambda x, dim: x.mean(dim=dim)
def forward(self, x):
if type(x) == list:
x = torch.stack(x)
return self.reducer(x, 0)
###############################################################################
class SpikingConvNet(nn.Module):
def __init__(self, device, n_time_steps, begin_eval):
super(SpikingConvNet, self).__init__()
assert (0 <= begin_eval and begin_eval < n_time_steps)
self.device = device
self.n_time_steps = n_time_steps
self.begin_eval = begin_eval
self.input_conversion = InputDataToSpikingPerceptronLayer(device)
self.layer1 = SpikingNeuronLayerCRNN(
device, n_hidden=100,
decay_multiplier=0.9, threshold=1.0, penalty_threshold=1.5
)
self.layer2 = SpikingNeuronLayerRNN(
device, n_inputs=100, n_hidden=10,
decay_multiplier=0.9, threshold=1.0, penalty_threshold=1.5
)
self.output_conversion = OutputDataToSpikingPerceptronLayer(average_output=False) # Sum on outputs.
self.to(self.device)
def forward_through_time(self, x):
"""
This acts as a layer. Its input is non-time-related, and its output too.
So the time iterations happens inside, and the returned layer is thus
passed through global average pooling on the time axis before the return
such as to be able to mix this pipeline with regular backprop layers such
as the input data and the output data.
"""
self.input_conversion.reset_state()
self.layer1.reset_state()
self.layer2.reset_state()
out = []
all_layer1_states = []
all_layer1_outputs = []
all_layer2_states = []
all_layer2_outputs = []
for _ in range(self.n_time_steps):
xi = self.input_conversion(x)
# For layer 1, we take the regular output.
layer1_state, layer1_output = self.layer1(xi)
# We take inner state of layer 2 because it's pre-activation and thus acts as out logits.
layer2_state, layer2_output = self.layer2(layer1_output)
all_layer1_states.append(layer1_state)
all_layer1_outputs.append(layer1_output)
all_layer2_states.append(layer2_state)
all_layer2_outputs.append(layer2_output)
out.append(layer2_state)
out = self.output_conversion(out[self.begin_eval:])
return out, [[all_layer1_states, all_layer1_outputs], [all_layer2_states, all_layer2_outputs]]
def forward(self, x):
out, _ = self.forward_through_time(x)
return F.log_softmax(out, dim=-1)
def visualize_all_neurons(self, x):
assert x.shape[0] == 1 and len(x.shape) == 4, (
"Pass only 1 example to SpikingNet.visualize(x) with outer dimension shape of 1.")
_, layers_state = self.forward_through_time(x)
for i, (all_layer_states, all_layer_outputs) in enumerate(layers_state):
layer_state = torch.stack(all_layer_states).data.cpu().numpy().squeeze().transpose()
layer_output = torch.stack(all_layer_outputs).data.cpu().numpy().squeeze().transpose()
self.plot_layer(layer_state, title="Inner state values of neurons for layer {}".format(i))
self.plot_layer(layer_output, title="Output spikes (activation) values of neurons for layer {}".format(i))
def visualize_neuron(self, x, layer_idx, neuron_idx):
assert x.shape[0] == 1 and len(x.shape) == 4, (
"Pass only 1 example to SpikingNet.visualize(x) with outer dimension shape of 1.")
_, layers_state = self.forward_through_time(x)
all_layer_states, all_layer_outputs = layers_state[layer_idx]
layer_state = torch.stack(all_layer_states).data.cpu().numpy().squeeze().transpose()
layer_output = torch.stack(all_layer_outputs).data.cpu().numpy().squeeze().transpose()
self.plot_neuron(layer_state[neuron_idx], title="Inner state values neuron {} of layer {}".format(neuron_idx, layer_idx))
self.plot_neuron(layer_output[neuron_idx], title="Output spikes (activation) values of neuron {} of layer {}".format(neuron_idx, layer_idx))
def plot_layer(self, layer_values, title):
"""
This function is derived from:
https://github.com/guillaume-chevalier/LSTM-Human-Activity-Recognition
Which was released under the MIT License.
"""
width = max(16, layer_values.shape[0] / 8)
height = max(4, layer_values.shape[1] / 8)
plt.figure(figsize=(width, height))
plt.imshow(
layer_values,
interpolation="nearest",
cmap=plt.cm.rainbow
)
plt.title(title)
plt.colorbar()
plt.xlabel("Time")
plt.ylabel("Neurons of layer")
plt.show()
def plot_neuron(self, neuron_through_time, title):
width = max(16, len(neuron_through_time) / 8)
height = 4
plt.figure(figsize=(width, height))
plt.title(title)
plt.plot(neuron_through_time)
plt.xlabel("Time")
plt.ylabel("Neuron's activation")
plt.show()
###############################################################################
class NonSpikingNet(nn.Module):
def __init__(self):
super(NonSpikingNet, self).__init__()
self.layer1 = nn.Linear(1* 28* 28, 100)
self.layer2 = nn.Linear(100, 10)
def forward(self, x, is_2D=True):
x = x.view(x.size(0), -1) # Flatten 2D image to 1D for FC
x = F.relu(self.layer1(x))
x = self.layer2(x)
return F.log_softmax(x, dim=-1)
# + id="pkQjPLVb8Dl9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="f1a7328e-a4e6-453e-b7b5-b1dcab061a81"
###############################################################################
num_epoch = 15
time_step_list = [128/2,128/4,128/8,128/16,128/32,128/64]
total_time_models = []
fig, ax_lst = plt.subplots(2, len(time_step_list)+1, sharex=True, sharey='row',figsize=(30,10))
for i,step in enumerate(time_step_list):
spiking_model = SpikingConvNet(device, n_time_steps=int(step), begin_eval=0)
loss, acc, time_m = train_many_epochs(spiking_model, epochs = num_epoch)
total_time_models.append(time_m)
ax_lst[0,i].plot(acc)
ax_lst[0,i].set_title("Spiking Acc, n time steps = %d"%int(step))
ax_lst[1,i].plot(loss)
ax_lst[1,i].set_title("Spiking Loss, n time steps = %d"%int(step))
non_spiking_model = NonSpikingNet().to(device)
loss, acc, time_m = train_many_epochs(non_spiking_model, epochs = num_epoch)
ax_lst[0,len(time_step_list)].plot(acc)
ax_lst[0,len(time_step_list)].set_title("NN Acc")
ax_lst[1,len(time_step_list)].plot(loss)
ax_lst[1,len(time_step_list)].set_title("NN Loss")
plt.show()
plt.figure(figsize=(10,10))
plt.plot(time_step_list, total_time_models)
plt.show()
# + id="4ciYKZur8_yE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 360, "referenced_widgets": ["2ba23f776ab040fda0e0202805249320", "9bdf8ef620ec40388be2e6ddf58d0c20", "07b7c4da9a514f0191455ae9ce7e28bf", "423b214e0a5f462aa93cf1a08ae9efb9", "3e3b108890614064be8fb823a8b98b8a", "1b9b752119044c6eb659bf0ec63d241f", "11c4a909557f425db340dd85cbb7725e", "fa1eb25a8e954d9e8d5061e2da50e17f", "69e431ae13ed46dcbdc699a2a0496617", "32fefda8590d4b60b514147b0482353b", "24bf69ed66fc4657996ae7d597666d3f", "dffe0a457d76450ab4d19f712fd07f25", "28f2c3a3027743a88e792e0b4dca2b56", "499d1d2e06f44aa29d4b8ef309ba05a0", "d85cab7e115a470194dcf12fd334b626", "966f1c771a1a415eb32e12887022b56d", "b0bd3fe65ce0487f825449368a0d2743", "7ced0740272c45bba368424b4984b97f", "583a8152dfb64d5c9ccd49fd6bb2f6ef", "d03037b0ea0549eaa8badbde9064d30b", "f028d837d1e54656b036fa872c7f5150", "9584e17665b141d2baf383e637540958", "8962f02bb95d47a7abcb3e9ef07b5d37", "3b351921b0dc4ae48cf3be9deabe277a", "eb141a1d45d34b05bc5b8693d0eeac45", "c48c01172e59407ab74208f7944cbf79", "a05d51791bf3435ea2a6fca31e875642", "24d13fc8f4c84c288f311f41a4d13cf5", "4f131ee1b1444d07926ad2ffb2343324", "80bacc958445442a9ff8ce98b7e30f5a", "0aa359bc1ed64c4198ade5204abed1c8", "d9abc056cfb2472eb4ef652660876094"]} outputId="79312677-b07b-4095-a34e-e8a66c9c7f47"
#Imports
import os
import matplotlib.pyplot as plt
import torchvision.datasets
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.autograd import Variable
###############################################################################
def train(model, device, train_set_loader, optimizer, epoch, logging_interval=1000):
# This method is derived from:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
# Was licensed BSD-3-clause
model.train()
for batch_idx, (data, target) in enumerate(train_set_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % logging_interval == 0:
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct = pred.eq(target.view_as(pred)).float().mean().item()
print('Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f} Accuracy: {:.2f}%'.format(
epoch, batch_idx * len(data), len(train_set_loader.dataset),
100. * batch_idx / len(train_set_loader), loss.item(),
100. * correct))
return loss.item()
import time
def train_many_epochs(model, epochs = 10):
lr = 0.1
start = time.time()
loss_ot = []
acc_ot = []
for epoch in range(epochs):
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.7)
loss = train(model, device, train_set_loader, optimizer, epoch, logging_interval=1000)
acc = test(model, device, test_set_loader)
loss_ot.append(loss)
acc_ot.append(acc)
lr = lr/2.0
end = time.time()
total_time = end - start
print(total_time)
return loss_ot, acc_ot, total_time
def test(model, device, test_set_loader):
# This method is derived from:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
# Was licensed BSD-3-clause
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_set_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# Note: with `reduce=True`, I'm not sure what would happen with a final batch size
# that would be smaller than regular previous batch sizes. For now it works.
test_loss += F.nll_loss(output, target, reduce=True).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_set_loader.dataset)
print("")
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
test_loss,
correct, len(test_set_loader.dataset),
100. * correct / len(test_set_loader.dataset)))
print("")
return 100. * correct / len(test_set_loader.dataset)
def download_mnist(data_path):
if not os.path.exists(data_path):
os.mkdir(data_path)
transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
training_set = torchvision.datasets.MNIST(data_path, train=True, transform=transformation, download=True)
testing_set = torchvision.datasets.MNIST(data_path, train=False, transform=transformation, download=True)
return training_set, testing_set
###############################################################################
batch_size = 32
DATA_PATH = './data'
training_set, testing_set = download_mnist(DATA_PATH)
train_set_loader = torch.utils.data.DataLoader(
dataset=training_set,
batch_size=batch_size,
shuffle=True)
test_set_loader = torch.utils.data.DataLoader(
dataset=testing_set,
batch_size=batch_size,
shuffle=False)
###############################################################################
# Use GPU whever possible!
import torch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
###############################################################################
class SpikingNeuronLayerRNN(nn.Module):
def __init__(self, device, n_inputs=28*28, n_hidden=100, decay_multiplier=0.9, threshold=2.0, penalty_threshold=2.5):
super(SpikingNeuronLayerRNN, self).__init__()
self.device = device
self.n_inputs = n_inputs
self.n_hidden = n_hidden
self.decay_multiplier = decay_multiplier
self.threshold = threshold
self.penalty_threshold = penalty_threshold
self.fc = nn.Linear(n_inputs, n_hidden)
self.init_parameters()
self.reset_state()
self.to(self.device)
def init_parameters(self):
for param in self.parameters():
if param.dim() >= 2:
nn.init.xavier_uniform_(param)
def reset_state(self):
self.prev_inner = torch.zeros([self.n_hidden]).to(self.device)
self.prev_outer = torch.zeros([self.n_hidden]).to(self.device)
def forward(self, x):
"""
Call the neuron at every time step.
x: activated_neurons_below
return: a tuple of (state, output) for each time step. Each item in the tuple
are then themselves of shape (batch_size, n_hidden) and are PyTorch objects, such
that the whole returned would be of shape (2, batch_size, n_hidden) if casted.
"""
if self.prev_inner.dim() == 1:
# Adding batch_size dimension directly after doing a `self.reset_state()`:
batch_size = x.shape[0]
self.prev_inner = torch.stack(batch_size * [self.prev_inner])
self.prev_outer = torch.stack(batch_size * [self.prev_outer])
# 1. Weight matrix multiplies the input x
input_excitation = self.fc(x)
# 2. We add the result to a decayed version of the information we already had.
inner_excitation = input_excitation + self.prev_inner * self.decay_multiplier
# 3. We compute the activation of the neuron to find its output value,
# but before the activation, there is also a negative bias that refrain thing from firing too much.
outer_excitation = F.relu(inner_excitation - self.threshold)
# 4. If the neuron fires, the activation of the neuron is subtracted to its inner state
# (and with an extra penalty for increase refractory time),
# because it discharges naturally so it shouldn't fire twice.
do_penalize_gate = (outer_excitation > 0).float()
# TODO: remove following /2?
inner_excitation = inner_excitation - (self.penalty_threshold/self.threshold * inner_excitation) * do_penalize_gate
# 5. The outer excitation has a negative part after the positive part.
outer_excitation = outer_excitation #+ torch.abs(self.prev_outer) * self.decay_multiplier / 2.0
# 6. Setting internal values before returning.
# And the returning value is the one of the previous time step to delay
# activation of 1 time step of "processing" time. For logits, we don't take activation.
delayed_return_state = self.prev_inner
delayed_return_output = self.prev_outer
self.prev_inner = inner_excitation
self.prev_outer = outer_excitation
return delayed_return_state, delayed_return_output
###############################################################################
class SpikingNeuronLayerCRNN(nn.Module):
def __init__(self, device, n_hidden=64, decay_multiplier=0.9, threshold=2.0, penalty_threshold=2.5):
super(SpikingNeuronLayerCRNN, self).__init__()
self.device = device
self.n_hidden = n_hidden
self.decay_multiplier = decay_multiplier
self.threshold = threshold
self.penalty_threshold = penalty_threshold
self.conv = torch.nn.Sequential(
nn.Conv2d(1, 6, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(6, 12, 5),
nn.ReLU(),
)
self.fc = torch.nn.Sequential(
nn.Linear(12 * 8 * 8, 100),
nn.ReLU()
)
self.init_parameters()
self.reset_state()
self.to(self.device)
def init_parameters(self):
for param in self.parameters():
if param.dim() >= 2:
nn.init.xavier_uniform_(param)
def reset_state(self):
self.prev_inner = torch.zeros([self.n_hidden]).to(self.device)
self.prev_outer = torch.zeros([self.n_hidden]).to(self.device)
def forward(self, x):
"""
Call the neuron at every time step.
x: activated_neurons_below
return: a tuple of (state, output) for each time step. Each item in the tuple
are then themselves of shape (batch_size, n_hidden) and are PyTorch objects, such
that the whole returned would be of shape (2, batch_size, n_hidden) if casted.
"""
if self.prev_inner.dim() == 1:
# Adding batch_size dimension directly after doing a `self.reset_state()`:
batch_size = x.shape[0]
self.prev_inner = torch.stack(batch_size * [self.prev_inner])
self.prev_outer = torch.stack(batch_size * [self.prev_outer])
# 1. Weight matrix multiplies the input x
conv_excitation = self.conv(x)
conv_excitation = conv_excitation.view(-1, 12 * 8 * 8)
input_excitation = self.fc(conv_excitation)
# 2. We add the result to a decayed version of the information we already had.
inner_excitation = input_excitation + self.prev_inner * self.decay_multiplier
# 3. We compute the activation of the neuron to find its output value,
# but before the activation, there is also a negative bias that refrain thing from firing too much.
outer_excitation = F.relu(inner_excitation - self.threshold)
# 4. If the neuron fires, the activation of the neuron is subtracted to its inner state
# (and with an extra penalty for increase refractory time),
# because it discharges naturally so it shouldn't fire twice.
do_penalize_gate = (outer_excitation > 0).float()
# TODO: remove following /2?
inner_excitation = inner_excitation - (self.penalty_threshold/self.threshold * inner_excitation) * do_penalize_gate
# 5. The outer excitation has a negative part after the positive part.
outer_excitation = outer_excitation #+ torch.abs(self.prev_outer) * self.decay_multiplier / 2.0
# 6. Setting internal values before returning.
# And the returning value is the one of the previous time step to delay
# activation of 1 time step of "processing" time. For logits, we don't take activation.
delayed_return_state = self.prev_inner
delayed_return_output = self.prev_outer
self.prev_inner = inner_excitation
self.prev_outer = outer_excitation
return delayed_return_state, delayed_return_output
###############################################################################
class InputDataToSpikingPerceptronLayer(nn.Module):
def __init__(self, device):
super(InputDataToSpikingPerceptronLayer, self).__init__()
self.device = device
self.reset_state()
self.to(self.device)
def reset_state(self):
# self.prev_state = torch.zeros([self.n_hidden]).to(self.device)
pass
def forward(self, x, is_2D=True):
#x = x.view(x.size(0), -1) # Flatten 2D image to 1D for FC
random_activation_perceptron = torch.rand(x.shape).to(self.device)
return random_activation_perceptron * x
###############################################################################
class OutputDataToSpikingPerceptronLayer(nn.Module):
def __init__(self, average_output=True):
"""
average_output: might be needed if this is used within a regular neural net as a layer.
Otherwise, sum may be numerically more stable for gradients with setting average_output=False.
"""
super(OutputDataToSpikingPerceptronLayer, self).__init__()
if average_output:
self.reducer = lambda x, dim: x.sum(dim=dim)
else:
self.reducer = lambda x, dim: x.mean(dim=dim)
def forward(self, x):
if type(x) == list:
x = torch.stack(x)
return self.reducer(x, 0)
###############################################################################
class SpikingConvNet(nn.Module):
def __init__(self, device, n_time_steps, begin_eval):
super(SpikingConvNet, self).__init__()
assert (0 <= begin_eval and begin_eval < n_time_steps)
self.device = device
self.n_time_steps = n_time_steps
self.begin_eval = begin_eval
self.input_conversion = InputDataToSpikingPerceptronLayer(device)
self.layer1 = SpikingNeuronLayerCRNN(
device, n_hidden=100,
decay_multiplier=0.9, threshold=1.0, penalty_threshold=1.5
)
self.layer2 = SpikingNeuronLayerRNN(
device, n_inputs=100, n_hidden=10,
decay_multiplier=0.9, threshold=1.0, penalty_threshold=1.5
)
self.output_conversion = OutputDataToSpikingPerceptronLayer(average_output=False) # Sum on outputs.
self.to(self.device)
def forward_through_time(self, x):
"""
This acts as a layer. Its input is non-time-related, and its output too.
So the time iterations happens inside, and the returned layer is thus
passed through global average pooling on the time axis before the return
such as to be able to mix this pipeline with regular backprop layers such
as the input data and the output data.
"""
self.input_conversion.reset_state()
self.layer1.reset_state()
self.layer2.reset_state()
out = []
all_layer1_states = []
all_layer1_outputs = []
all_layer2_states = []
all_layer2_outputs = []
for _ in range(self.n_time_steps):
xi = self.input_conversion(x)
# For layer 1, we take the regular output.
layer1_state, layer1_output = self.layer1(xi)
# We take inner state of layer 2 because it's pre-activation and thus acts as out logits.
layer2_state, layer2_output = self.layer2(layer1_output)
all_layer1_states.append(layer1_state)
all_layer1_outputs.append(layer1_output)
all_layer2_states.append(layer2_state)
all_layer2_outputs.append(layer2_output)
out.append(layer2_state)
out = self.output_conversion(out[self.begin_eval:])
return out, [[all_layer1_states, all_layer1_outputs], [all_layer2_states, all_layer2_outputs]]
def forward(self, x):
out, _ = self.forward_through_time(x)
return F.log_softmax(out, dim=-1)
def visualize_all_neurons(self, x):
assert x.shape[0] == 1 and len(x.shape) == 4, (
"Pass only 1 example to SpikingNet.visualize(x) with outer dimension shape of 1.")
_, layers_state = self.forward_through_time(x)
for i, (all_layer_states, all_layer_outputs) in enumerate(layers_state):
layer_state = torch.stack(all_layer_states).data.cpu().numpy().squeeze().transpose()
layer_output = torch.stack(all_layer_outputs).data.cpu().numpy().squeeze().transpose()
self.plot_layer(layer_state, title="Inner state values of neurons for layer {}".format(i))
self.plot_layer(layer_output, title="Output spikes (activation) values of neurons for layer {}".format(i))
def visualize_neuron(self, x, layer_idx, neuron_idx):
assert x.shape[0] == 1 and len(x.shape) == 4, (
"Pass only 1 example to SpikingNet.visualize(x) with outer dimension shape of 1.")
_, layers_state = self.forward_through_time(x)
all_layer_states, all_layer_outputs = layers_state[layer_idx]
layer_state = torch.stack(all_layer_states).data.cpu().numpy().squeeze().transpose()
layer_output = torch.stack(all_layer_outputs).data.cpu().numpy().squeeze().transpose()
self.plot_neuron(layer_state[neuron_idx], title="Inner state values neuron {} of layer {}".format(neuron_idx, layer_idx))
self.plot_neuron(layer_output[neuron_idx], title="Output spikes (activation) values of neuron {} of layer {}".format(neuron_idx, layer_idx))
def plot_layer(self, layer_values, title):
"""
This function is derived from:
https://github.com/guillaume-chevalier/LSTM-Human-Activity-Recognition
Which was released under the MIT License.
"""
width = max(16, layer_values.shape[0] / 8)
height = max(4, layer_values.shape[1] / 8)
plt.figure(figsize=(width, height))
plt.imshow(
layer_values,
interpolation="nearest",
cmap=plt.cm.rainbow
)
plt.title(title)
plt.colorbar()
plt.xlabel("Time")
plt.ylabel("Neurons of layer")
plt.show()
def plot_neuron(self, neuron_through_time, title):
width = max(16, len(neuron_through_time) / 8)
height = 4
plt.figure(figsize=(width, height))
plt.title(title)
plt.plot(neuron_through_time)
plt.xlabel("Time")
plt.ylabel("Neuron's activation")
plt.show()
###############################################################################
class NonSpikingNet(nn.Module):
def __init__(self):
super(NonSpikingNet, self).__init__()
self.layer1 = nn.Linear(1* 28* 28, 100)
self.layer2 = nn.Linear(100, 10)
def forward(self, x, is_2D=True):
x = x.view(x.size(0), -1) # Flatten 2D image to 1D for FC
x = F.relu(self.layer1(x))
x = self.layer2(x)
return F.log_softmax(x, dim=-1)
# + id="HhHyVvrLKMLc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a4bc667b-bdd0-4292-b2b4-180e361b19bd"
###############################################################################
num_epoch = 15
time_step_list = [128/1,128/2]
total_time_models = []
fig, ax_lst = plt.subplots(2, len(time_step_list)+1, sharex=True, sharey='row',figsize=(30,10))
for i,step in enumerate(time_step_list):
spiking_model = SpikingConvNet(device, n_time_steps=int(step), begin_eval=0)
loss, acc, time_m = train_many_epochs(spiking_model, epochs = num_epoch)
total_time_models.append(time_m)
ax_lst[0,i].plot(acc)
ax_lst[0,i].set_title("Spiking Acc, n time steps = %d"%int(step))
ax_lst[1,i].plot(loss)
ax_lst[1,i].set_title("Spiking Loss, n time steps = %d"%int(step))
# + [markdown] id="Wigpabl0v8r2" colab_type="text"
# Colab killed my above code :(
# + id="eFpg8Udkv8Gr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="00fca052-cbb4-44d3-88cf-00aa877d2783"
###############################################################################
num_epoch = 10
time_step_list = [128/2]
total_time_models = []
fig, ax_lst = plt.subplots(2, len(time_step_list)+1, sharex=True, sharey='row',figsize=(30,10))
for i,step in enumerate(time_step_list):
spiking_model = SpikingConvNet(device, n_time_steps=int(step), begin_eval=0)
loss, acc, time_m = train_many_epochs(spiking_model, epochs = num_epoch)
total_time_models.append(time_m)
ax_lst[0,i].plot(acc)
ax_lst[0,i].set_title("Spiking Acc, n time steps = %d"%int(step))
ax_lst[1,i].plot(loss)
ax_lst[1,i].set_title("Spiking Loss, n time steps = %d"%int(step))
# + id="IOr7kSpy8Qvo" colab_type="code" colab={}
#Imports
import os
import matplotlib.pyplot as plt
import torchvision.datasets
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from torch.autograd import Variable
###############################################################################
def train(model, device, train_set_loader, optimizer, epoch, logging_interval=1000):
# This method is derived from:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
# Was licensed BSD-3-clause
model.train()
for batch_idx, (data, target) in enumerate(train_set_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % logging_interval == 0:
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct = pred.eq(target.view_as(pred)).float().mean().item()
print('Train Epoch: {} [{}/{} ({:.0f}%)] Loss: {:.6f} Accuracy: {:.2f}%'.format(
epoch, batch_idx * len(data), len(train_set_loader.dataset),
100. * batch_idx / len(train_set_loader), loss.item(),
100. * correct))
return loss.item()
import time
def train_many_epochs(model, epochs = 10):
lr = 0.1
start = time.time()
loss_ot = []
acc_ot = []
for epoch in range(epochs):
optimizer = optim.SGD(model.parameters(), lr=0.03, momentum=0.9)
loss = train(model, device, train_set_loader, optimizer, epoch, logging_interval=1000)
acc = test(model, device, test_set_loader)
loss_ot.append(loss)
acc_ot.append(acc)
lr = lr/2.0
end = time.time()
total_time = end - start
print(total_time)
return loss_ot, acc_ot, total_time
def test(model, device, test_set_loader):
# This method is derived from:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
# Was licensed BSD-3-clause
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_set_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# Note: with `reduce=True`, I'm not sure what would happen with a final batch size
# that would be smaller than regular previous batch sizes. For now it works.
test_loss += F.nll_loss(output, target, reduce=True).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_set_loader.dataset)
print("")
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
test_loss,
correct, len(test_set_loader.dataset),
100. * correct / len(test_set_loader.dataset)))
print("")
return 100. * correct / len(test_set_loader.dataset)
def download_mnist(data_path):
if not os.path.exists(data_path):
os.mkdir(data_path)
transformation = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
training_set = torchvision.datasets.MNIST(data_path, train=True, transform=transformation, download=True)
testing_set = torchvision.datasets.MNIST(data_path, train=False, transform=transformation, download=True)
return training_set, testing_set
###############################################################################
batch_size = 32
DATA_PATH = './data'
training_set, testing_set = download_mnist(DATA_PATH)
train_set_loader = torch.utils.data.DataLoader(
dataset=training_set,
batch_size=batch_size,
shuffle=True)
test_set_loader = torch.utils.data.DataLoader(
dataset=testing_set,
batch_size=batch_size,
shuffle=False)
###############################################################################
# Use GPU whever possible!
import torch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
###############################################################################
class SpikingNeuronLayerRNN(nn.Module):
def __init__(self, device, n_inputs=28*28, n_hidden=100, decay_multiplier=0.9, threshold=2.0, penalty_threshold=2.5):
super(SpikingNeuronLayerRNN, self).__init__()
self.device = device
self.n_inputs = n_inputs
self.n_hidden = n_hidden
self.decay_multiplier = decay_multiplier
self.threshold = threshold
self.penalty_threshold = penalty_threshold
self.fc = nn.Linear(n_inputs, n_hidden)
self.init_parameters()
self.reset_state()
self.to(self.device)
def init_parameters(self):
for param in self.parameters():
if param.dim() >= 2:
nn.init.xavier_uniform_(param)
def reset_state(self):
self.prev_inner = torch.zeros([self.n_hidden]).to(self.device)
self.prev_outer = torch.zeros([self.n_hidden]).to(self.device)
def forward(self, x):
"""
Call the neuron at every time step.
x: activated_neurons_below
return: a tuple of (state, output) for each time step. Each item in the tuple
are then themselves of shape (batch_size, n_hidden) and are PyTorch objects, such
that the whole returned would be of shape (2, batch_size, n_hidden) if casted.
"""
if self.prev_inner.dim() == 1:
# Adding batch_size dimension directly after doing a `self.reset_state()`:
batch_size = x.shape[0]
self.prev_inner = torch.stack(batch_size * [self.prev_inner])
self.prev_outer = torch.stack(batch_size * [self.prev_outer])
# 1. Weight matrix multiplies the input x
input_excitation = self.fc(x)
# 2. We add the result to a decayed version of the information we already had.
inner_excitation = input_excitation + self.prev_inner * self.decay_multiplier
# 3. We compute the activation of the neuron to find its output value,
# but before the activation, there is also a negative bias that refrain thing from firing too much.
outer_excitation = F.relu(inner_excitation - self.threshold)
# 4. If the neuron fires, the activation of the neuron is subtracted to its inner state
# (and with an extra penalty for increase refractory time),
# because it discharges naturally so it shouldn't fire twice.
do_penalize_gate = (outer_excitation > 0).float()
# TODO: remove following /2?
inner_excitation = inner_excitation - (self.penalty_threshold/self.threshold * inner_excitation) * do_penalize_gate
# 5. The outer excitation has a negative part after the positive part.
outer_excitation = outer_excitation #+ torch.abs(self.prev_outer) * self.decay_multiplier / 2.0
# 6. Setting internal values before returning.
# And the returning value is the one of the previous time step to delay
# activation of 1 time step of "processing" time. For logits, we don't take activation.
delayed_return_state = self.prev_inner
delayed_return_output = self.prev_outer
self.prev_inner = inner_excitation
self.prev_outer = outer_excitation
return delayed_return_state, delayed_return_output
###############################################################################
class SpikingNeuronLayerCRNN(nn.Module):
def __init__(self, device, n_hidden=64, decay_multiplier=0.9, threshold=2.0, penalty_threshold=2.5):
super(SpikingNeuronLayerCRNN, self).__init__()
self.device = device
self.n_hidden = n_hidden
self.decay_multiplier = decay_multiplier
self.threshold = threshold
self.penalty_threshold = penalty_threshold
self.conv = torch.nn.Sequential(
nn.Conv2d(1, 6, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(6, 12, 5),
nn.ReLU(),
)
self.fc = torch.nn.Sequential(
nn.Linear(12 * 8 * 8, 100),
nn.ReLU()
)
self.init_parameters()
self.reset_state()
self.to(self.device)
def init_parameters(self):
for param in self.parameters():
if param.dim() >= 2:
nn.init.xavier_uniform_(param)
def reset_state(self):
self.prev_inner = torch.zeros([self.n_hidden]).to(self.device)
self.prev_outer = torch.zeros([self.n_hidden]).to(self.device)
def forward(self, x):
"""
Call the neuron at every time step.
x: activated_neurons_below
return: a tuple of (state, output) for each time step. Each item in the tuple
are then themselves of shape (batch_size, n_hidden) and are PyTorch objects, such
that the whole returned would be of shape (2, batch_size, n_hidden) if casted.
"""
if self.prev_inner.dim() == 1:
# Adding batch_size dimension directly after doing a `self.reset_state()`:
batch_size = x.shape[0]
self.prev_inner = torch.stack(batch_size * [self.prev_inner])
self.prev_outer = torch.stack(batch_size * [self.prev_outer])
# 1. Weight matrix multiplies the input x
conv_excitation = self.conv(x)
conv_excitation = conv_excitation.view(-1, 12 * 8 * 8)
input_excitation = self.fc(conv_excitation)
# 2. We add the result to a decayed version of the information we already had.
inner_excitation = input_excitation + self.prev_inner * self.decay_multiplier
# 3. We compute the activation of the neuron to find its output value,
# but before the activation, there is also a negative bias that refrain thing from firing too much.
outer_excitation = F.relu(inner_excitation - self.threshold)
# 4. If the neuron fires, the activation of the neuron is subtracted to its inner state
# (and with an extra penalty for increase refractory time),
# because it discharges naturally so it shouldn't fire twice.
do_penalize_gate = (outer_excitation > 0).float()
# TODO: remove following /2?
inner_excitation = inner_excitation - (self.penalty_threshold/self.threshold * inner_excitation) * do_penalize_gate
# 5. The outer excitation has a negative part after the positive part.
outer_excitation = outer_excitation #+ torch.abs(self.prev_outer) * self.decay_multiplier / 2.0
# 6. Setting internal values before returning.
# And the returning value is the one of the previous time step to delay
# activation of 1 time step of "processing" time. For logits, we don't take activation.
delayed_return_state = self.prev_inner
delayed_return_output = self.prev_outer
self.prev_inner = inner_excitation
self.prev_outer = outer_excitation
return delayed_return_state, delayed_return_output
###############################################################################
class InputDataToSpikingPerceptronLayer(nn.Module):
def __init__(self, device):
super(InputDataToSpikingPerceptronLayer, self).__init__()
self.device = device
self.reset_state()
self.to(self.device)
def reset_state(self):
# self.prev_state = torch.zeros([self.n_hidden]).to(self.device)
pass
def forward(self, x, is_2D=True):
#x = x.view(x.size(0), -1) # Flatten 2D image to 1D for FC
random_activation_perceptron = torch.rand(x.shape).to(self.device)
return random_activation_perceptron * x
###############################################################################
class OutputDataToSpikingPerceptronLayer(nn.Module):
def __init__(self, average_output=True):
"""
average_output: might be needed if this is used within a regular neural net as a layer.
Otherwise, sum may be numerically more stable for gradients with setting average_output=False.
"""
super(OutputDataToSpikingPerceptronLayer, self).__init__()
if average_output:
self.reducer = lambda x, dim: x.sum(dim=dim)
else:
self.reducer = lambda x, dim: x.mean(dim=dim)
def forward(self, x):
if type(x) == list:
x = torch.stack(x)
return self.reducer(x, 0)
###############################################################################
class SpikingConvNet(nn.Module):
def __init__(self, device, n_time_steps, begin_eval):
super(SpikingConvNet, self).__init__()
assert (0 <= begin_eval and begin_eval < n_time_steps)
self.device = device
self.n_time_steps = n_time_steps
self.begin_eval = begin_eval
self.input_conversion = InputDataToSpikingPerceptronLayer(device)
self.layer1 = SpikingNeuronLayerCRNN(
device, n_hidden=100,
decay_multiplier=0.9, threshold=1.0, penalty_threshold=1.5
)
self.layer2 = SpikingNeuronLayerRNN(
device, n_inputs=100, n_hidden=10,
decay_multiplier=0.9, threshold=1.0, penalty_threshold=1.5
)
self.output_conversion = OutputDataToSpikingPerceptronLayer(average_output=False) # Sum on outputs.
self.to(self.device)
def forward_through_time(self, x):
"""
This acts as a layer. Its input is non-time-related, and its output too.
So the time iterations happens inside, and the returned layer is thus
passed through global average pooling on the time axis before the return
such as to be able to mix this pipeline with regular backprop layers such
as the input data and the output data.
"""
self.input_conversion.reset_state()
self.layer1.reset_state()
self.layer2.reset_state()
out = []
all_layer1_states = []
all_layer1_outputs = []
all_layer2_states = []
all_layer2_outputs = []
for _ in range(self.n_time_steps):
xi = self.input_conversion(x)
# For layer 1, we take the regular output.
layer1_state, layer1_output = self.layer1(xi)
# We take inner state of layer 2 because it's pre-activation and thus acts as out logits.
layer2_state, layer2_output = self.layer2(layer1_output)
all_layer1_states.append(layer1_state)
all_layer1_outputs.append(layer1_output)
all_layer2_states.append(layer2_state)
all_layer2_outputs.append(layer2_output)
out.append(layer2_state)
out = self.output_conversion(out[self.begin_eval:])
return out, [[all_layer1_states, all_layer1_outputs], [all_layer2_states, all_layer2_outputs]]
def forward(self, x):
out, _ = self.forward_through_time(x)
return F.log_softmax(out, dim=-1)
def visualize_all_neurons(self, x):
assert x.shape[0] == 1 and len(x.shape) == 4, (
"Pass only 1 example to SpikingNet.visualize(x) with outer dimension shape of 1.")
_, layers_state = self.forward_through_time(x)
for i, (all_layer_states, all_layer_outputs) in enumerate(layers_state):
layer_state = torch.stack(all_layer_states).data.cpu().numpy().squeeze().transpose()
layer_output = torch.stack(all_layer_outputs).data.cpu().numpy().squeeze().transpose()
self.plot_layer(layer_state, title="Inner state values of neurons for layer {}".format(i))
self.plot_layer(layer_output, title="Output spikes (activation) values of neurons for layer {}".format(i))
def visualize_neuron(self, x, layer_idx, neuron_idx):
assert x.shape[0] == 1 and len(x.shape) == 4, (
"Pass only 1 example to SpikingNet.visualize(x) with outer dimension shape of 1.")
_, layers_state = self.forward_through_time(x)
all_layer_states, all_layer_outputs = layers_state[layer_idx]
layer_state = torch.stack(all_layer_states).data.cpu().numpy().squeeze().transpose()
layer_output = torch.stack(all_layer_outputs).data.cpu().numpy().squeeze().transpose()
self.plot_neuron(layer_state[neuron_idx], title="Inner state values neuron {} of layer {}".format(neuron_idx, layer_idx))
self.plot_neuron(layer_output[neuron_idx], title="Output spikes (activation) values of neuron {} of layer {}".format(neuron_idx, layer_idx))
def plot_layer(self, layer_values, title):
"""
This function is derived from:
https://github.com/guillaume-chevalier/LSTM-Human-Activity-Recognition
Which was released under the MIT License.
"""
width = max(16, layer_values.shape[0] / 8)
height = max(4, layer_values.shape[1] / 8)
plt.figure(figsize=(width, height))
plt.imshow(
layer_values,
interpolation="nearest",
cmap=plt.cm.rainbow
)
plt.title(title)
plt.colorbar()
plt.xlabel("Time")
plt.ylabel("Neurons of layer")
plt.show()
def plot_neuron(self, neuron_through_time, title):
width = max(16, len(neuron_through_time) / 8)
height = 4
plt.figure(figsize=(width, height))
plt.title(title)
plt.plot(neuron_through_time)
plt.xlabel("Time")
plt.ylabel("Neuron's activation")
plt.show()
###############################################################################
class NonSpikingNet(nn.Module):
def __init__(self):
super(NonSpikingNet, self).__init__()
self.layer1 = nn.Linear(1* 28* 28, 100)
self.layer2 = nn.Linear(100, 10)
def forward(self, x, is_2D=True):
x = x.view(x.size(0), -1) # Flatten 2D image to 1D for FC
x = F.relu(self.layer1(x))
x = self.layer2(x)
return F.log_softmax(x, dim=-1)
# + id="2yE5YmRxKblE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="046415b7-5296-42e0-ef69-5b3680954ca7"
###############################################################################
num_epoch = 15
time_step_list = [128/2]
total_time_models = []
fig, ax_lst = plt.subplots(2, len(time_step_list), sharex=True, sharey='row',figsize=(30,10))
for i,step in enumerate(time_step_list):
spiking_model = SpikingConvNet(device, n_time_steps=int(step), begin_eval=0)
loss, acc, time_m = train_many_epochs(spiking_model, epochs = num_epoch)
total_time_models.append(time_m)
ax_lst[0].plot(acc)
ax_lst[0].set_title("Spiking Acc, n time steps = %d"%int(step))
ax_lst[1].plot(loss)
ax_lst[1].set_title("Spiking Loss, n time steps = %d"%int(step))
# + id="J2burh9W8doy" colab_type="code" colab={}
| 115,806 |
/Cours-TD/B-exos-corrigés-1.ipynb | de981e30d53ca6d5649f6e5475a1251c06ac072e | [] | no_license | FranckCHAMBON/L1-Math-Info---Arith | https://github.com/FranckCHAMBON/L1-Math-Info---Arith | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 9,890 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Florian BARRAL
# florian.barral@essec.edu
# **You may need to install [OpenCV](https://pypi.python.org/pypi/opencv-python) and [scikit-video](http://www.scikit-video.org/stable/).**
# +
import keras
import numpy as np
import io
import base64
from IPython.display import HTML
import skvideo.io
import cv2
import json
import random
from keras.models import Sequential,model_from_json
from keras.layers.core import Dense, Reshape, Flatten
from keras.optimizers import sgd
from keras.layers import Conv2D, MaxPooling2D, Activation, AveragePooling2D,Reshape,BatchNormalization
# -
# # MiniProject #3: Deep Reinforcement Learning
# __Notations__: $E_p$ is the expectation under probability $p$. Please justify each of your answer and widely comment your code.
# # Context
# In a reinforcement learning algorithm, we modelize each step $t$ as an action $a_t$ obtained from a state $s_t$, i.e. $\{(a_{t},s_{t})_{t\leq T}\}$ having the Markov property. We consider a discount factor $\gamma \in [0,1]$ that ensures convergence. The goal is to find among all the policies $\pi$, one that maximizes the expected reward:
#
# \begin{equation*}
# R(\pi)=\sum_{t\leq T}E_{p^{\pi}}[\gamma^t r(s_{t},a_{t})] \> ,
# \end{equation*}
#
# where:
# \begin{equation*}p^{\pi}(a_{0},a_{1},s_{1},...,a_{T},s_{T})=p(a_{0})\prod_{t=1}^{T}\pi(a_{t}|s_{t})p(s_{t+1}|s_{t},a_{t}) \> .
# \end{equation*}
#
# We note the $Q$-function:
#
# \begin{equation*}Q^\pi(s,a)=E_{p^{\pi}}[\sum_{t\leq T}\gamma^{t}r(s_{t},a_{t})|s_{0}=s,a_{0}=a] \> .
# \end{equation*}
#
# Thus, the optimal Q function is:
# \begin{equation*}
# Q^*(s,a)=\max_{\pi}Q^\pi(s,a) \> .
# \end{equation*}
#
# In this project, we will apply the deep reinforcement learning techniques to a simple game: an agent will have to learn from scratch a policy that will permit it maximizing a reward.
# ## The environment, the agent and the game
# ### The environment
# ```Environment``` is an abstract class that represents the states, rewards, and actions to obtain the new state.
class Environment(object):
def __init__(self):
pass
def act(self, act):
"""
One can act on the environment and obtain its reaction:
- the new state
- the reward of the new state
- should we continue the game?
:return: state, reward, game_over
"""
pass
def reset(self):
"""
Reinitialize the environment to a random state and returns
the original state
:return: state
"""
pass
def draw(self):
"""
Visualize in the console or graphically the current state
"""
pass
# The method ```act``` allows to act on the environment at a given state $s_t$ (stored internally), via action $a_t$. The method will return the new state $s_{t+1}$, the reward $r(s_{t},a_{t})$ and determines if $t\leq T$ (*game_over*).
#
# The method ```reset``` simply reinitializes the environment to a random state $s_0$.
#
# The method ```draw``` displays the current state $s_t$ (this is useful to check the behavior of the Agent).
#
# We modelize $s_t$ as a tensor, while $a_t$ is an integer.
# ### The Agent
# The goal of the ```Agent``` is to interact with the ```Environment``` by proposing actions $a_t$ obtained from a given state $s_t$ to attempt to maximize its __reward__ $r(s_t,a_t)$. We propose the following abstract class:
class Agent(object):
def __init__(self, epsilon=0.1, n_action=4):
self.epsilon = epsilon
self.n_action = n_action
def set_epsilon(self,e):
self.epsilon = e
def act(self,s,train=True):
""" This function should return the next action to do:
an integer between 0 and 4 (not included) with a random exploration of epsilon"""
if train:
if np.random.rand() <= self.epsilon:
a = np.random.randint(0, self.n_action, size=1)[0]
else:
a = self.learned_act(s)
else: # in some cases, this can improve the performance.. remove it if poor performances
a = self.learned_act(s)
return a
def learned_act(self,s):
""" Act via the policy of the agent, from a given state s
it proposes an action a"""
pass
def reinforce(self, s, n_s, a, r, game_over_):
""" This function is the core of the learning algorithm.
It takes as an input the current state s_, the next state n_s_
the action a_ used to move from s_ to n_s_ and the reward r_.
Its goal is to learn a policy.
"""
pass
def save(self):
""" This function returns basic stats if applicable: the
loss and/or the model"""
pass
def load(self):
""" This function allows to restore a model"""
pass
# ***
# __Question 1__:
# Explain the function act. Why is ```epsilon``` essential?
# The function ```act``` returns an action that the Agent needs to take according to a policy/tactic. <br>
# $\epsilon$ is essential because it allows the agent to explore. The algorithm would just return the learned tactic without this probability of random action. By exploring other states through random actions, the agent can optimize his learned policy/tactic.
# ***
# ### The Game
# The ```Agent``` and the ```Environment``` work in an interlaced way as in the following (take some time to understand this code as it is the core of the project)
#
# ```python
#
# epoch = 300
# env = Environment()
# agent = Agent()
#
#
# # Number of won games
# score = 0
# loss = 0
#
#
# for e in range(epoch):
# # At each epoch, we restart to a fresh game and get the initial state
# state = env.reset()
# # This assumes that the games will end
# game_over = False
#
# win = 0
# lose = 0
#
# while not game_over:
# # The agent performs an action
# action = agent.act(state)
#
# # Apply an action to the environment, get the next state, the reward
# # and if the games end
# prev_state = state
# state, reward, game_over = env.act(action)
#
# # Update the counters
# if reward > 0:
# win = win + reward
# if reward < 0:
# lose = lose -reward
#
# # Apply the reinforcement strategy
# loss = agent.reinforce(prev_state, state, action, reward, game_over)
#
# # Save as a mp4
# if e % 10 == 0:
# env.draw(e)
#
# # Update stats
# score += win-lose
#
# print("Epoch {:03d}/{:03d} | Loss {:.4f} | Win/lose count {}/{} ({})"
# .format(e, epoch, loss, win, lose, win-lose))
# agent.save()
# ```
# # The game, *eat cheese*
# A rat runs on an island and tries to eat as much as possible. The island is subdivided into $N\times N$ cells, in which there are cheese (+0.5) and poisonous cells (-1). The rat has a visibility of 2 cells (thus it can see $5^2$ cells). The rat is given a time $T$ to accumulate as much food as possible. It can perform 4 actions: going up, down, left, right.
#
# The goal is to code an agent to solve this task that will learn by trial and error. We propose the following environment:
class Environment(object):
def __init__(self, grid_size=10, max_time=500, temperature=0.1):
grid_size = grid_size+4
self.grid_size = grid_size
self.max_time = max_time
self.temperature = temperature
#board on which one plays
self.board = np.zeros((grid_size,grid_size))
self.position = np.zeros((grid_size,grid_size))
# coordinate of the rat
self.x = 0
self.y = 1
# self time
self.t = 0
self.scale=16
self.to_draw = np.zeros((max_time+2, grid_size*self.scale, grid_size*self.scale, 3))
def draw(self,e):
skvideo.io.vwrite(str(e) + '.mp4', self.to_draw)
def get_frame(self,t):
b = np.zeros((self.grid_size,self.grid_size,3))+128
b[self.board>0,0] = 256
b[self.board < 0, 2] = 256
b[self.x,self.y,:]=256
b[-2:,:,:]=0
b[:,-2:,:]=0
b[:2,:,:]=0
b[:,:2,:]=0
b = cv2.resize(b, None, fx=self.scale, fy=self.scale, interpolation=cv2.INTER_NEAREST)
self.to_draw[t,:,:,:]=b
def act(self, action):
"""This function returns the new state, reward and decides if the
game ends."""
self.get_frame(int(self.t))
self.position = np.zeros((self.grid_size, self.grid_size))
self.position[0:2,:]= -1
self.position[:,0:2] = -1
self.position[-2:, :] = -1
self.position[:,-2:] = -1
self.position[self.x, self.y] = 1
if action == 0:
if self.x == self.grid_size-3:
self.x = self.x-1
else:
self.x = self.x + 1
elif action == 1:
if self.x == 2:
self.x = self.x+1
else:
self.x = self.x-1
elif action == 2:
if self.y == self.grid_size - 3:
self.y = self.y - 1
else:
self.y = self.y + 1
elif action == 3:
if self.y == 2:
self.y = self.y + 1
else:
self.y = self.y - 1
else:
RuntimeError('Error: action not recognized')
self.t = self.t + 1
reward = self.board[self.x, self.y]
self.board[self.x, self.y] = 0
game_over = self.t > self.max_time
state = np.concatenate((self.board.reshape(self.grid_size, self.grid_size,1),
self.position.reshape(self.grid_size, self.grid_size,1)),axis=2)
state = state[self.x-2:self.x+3,self.y-2:self.y+3,:]
return state, reward, game_over
def reset(self):
"""This function resets the game and returns the initial state"""
self.x = np.random.randint(3, self.grid_size-3, size=1)[0]
self.y = np.random.randint(3, self.grid_size-3, size=1)[0]
bonus = 0.5*np.random.binomial(1,self.temperature,size=self.grid_size**2)
bonus = bonus.reshape(self.grid_size,self.grid_size)
malus = -1.0*np.random.binomial(1,self.temperature,size=self.grid_size**2)
malus = malus.reshape(self.grid_size, self.grid_size)
self.to_draw = np.zeros((self.max_time+2, self.grid_size*self.scale, self.grid_size*self.scale, 3))
malus[bonus>0]=0
self.board = bonus + malus
self.position = np.zeros((self.grid_size, self.grid_size))
self.position[0:2,:]= -1
self.position[:,0:2] = -1
self.position[-2:, :] = -1
self.position[:,-2:] = -1
self.board[self.x,self.y] = 0
self.t = 0
state = np.concatenate((
self.board.reshape(self.grid_size, self.grid_size,1),
self.position.reshape(self.grid_size, self.grid_size,1)),axis=2)
state = state[self.x - 2:self.x + 3, self.y - 2:self.y + 3, :]
return state
# The following elements are important because they correspond to the hyper parameters for this project:
# +
# parameters
size = 13
T=200
temperature=0.3
epochs_train=30 # set small when debugging
epochs_test=30 # set small when debugging
# display videos
def display_videos(name):
video = io.open(name, 'r+b').read()
encoded = base64.b64encode(video)
return '''<video alt="test" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii'))
# -
# __Question 2__ Explain the use of the arrays ```position``` and ```board```.
# The `position` is the currently tracked position of the rat on the island.
#
# The `board` is the representation of the island of all the cells, that means all the possible states.
# ## Random Agent
# ***
# __Question 3__ Implement a random Agent (only ```learned_act``` needs to be implemented):
class RandomAgent(Agent):
def __init__(self,n_action=4):
super(RandomAgent, self).__init__()
self.n_action = n_action
pass
def learned_act(self, s):
return np.random.randint(0 ,self.n_action ,size=1)
pass
# ***
# ***
# __Question 4__ Visualize the game moves. You need to fill in the following function for the evaluation:
def test(agent,env,epochs,prefix=''):
# Number of won games
score = 0
for e in range(1,epochs+1):
##### FILL IN HERE
#At each epoch, we restart to a fresh game and get the initial state
state = env.reset()
# This assumes that the games will end
game_over = False
win = 0
lose = 0
while not game_over:
# The agent performs an action
action = agent.learned_act(state)
# Apply an action to the environment, get the next state, the reward
# And if the games end
state, reward, game_over = env.act(action)
# Update the counters
if reward > 0:
win += reward
if reward < 0:
lose -= reward
# Apply the reinforcement strategy
# Save as a mp4
if e % 10 == 0:
env.draw(prefix+str(e))
# Update stats
score += win-lose
print("Win/lose count {}/{}. Average score ({})"
.format(win, lose, score/(1+e)))
print('Final score: '+str(score/epochs))
# +
# Initialize the game
env = Environment(grid_size=size, max_time=T,temperature=temperature)
# Initialize the agent!
agent = RandomAgent()
test(agent,env,epochs_test,prefix='random')
HTML(display_videos('random10.mp4'))
# -
# ***
# ## DQN
# Let us assume here that $T=\infty$.
#
# ***
# __Question 5__ Let $\pi$ be a policy, **(1)** show that:
#
# \begin{equation*}
# Q^{\pi}(s,a)=E_{(s',a')\sim p(.|s,a)}[r(s,a)+\gamma Q^{\pi}(s',a')]
# \end{equation*}
#
# Then, **(2)** show that for the optimal policy $\pi^*$ (we assume its existence), the following holds:
#
# \begin{equation*}
# Q^{*}(s,a)=E_{s'\sim \pi^*(.|s,a)}[r(s,a)+\gamma\max_{a'}Q^{*}(s',a')].
# \end{equation*}
# Finally, **(3)** deduce that a plausible objective is:
#
# \begin{equation*}
# \mathcal{L}(\theta)=E_{s' \sim \pi^*(.|s,a)}\Vert r+\gamma\max\max_{a'}Q(s',a',\theta)-Q(s,a,\theta)\Vert^{2}.
# \end{equation*}
#
#
#
# **(1)**
#
# Given the Bellman equation:
#
# \begin{align*}
# Q^{\pi}(s,a) & = E_{\pi}[\Sigma^{\infty}_{t=0} \gamma^t r_t(s_t, \pi(s_t),s_{t+1}) | s_t = s,a_t = a] \\
# \Rightarrow Q^{\pi}(s_0,a_0) & = E_{\pi}[r_0(s_0,a_0,s_1)+\Sigma^{\infty}_{t=1} \gamma^t r_t (s_t,\pi(s_t),s_{t+1}) | s_0,a_0 ] \\
# & = E_{\pi}[r_0(s_0,a_0,s_1) + \gamma E_{\pi} \{ \Sigma_{t=1}^{\infty} \gamma^{t-1} r_t (s_t,\pi(s_t),s_{t+1} | s_1,a_1 \} s_0,a_0 ] \\
# & = E_{\pi} [r_0 (s_0,a_0,s_1)+\gamma Q^{\pi}(s_1,a_1)]
# \end{align*}
#
# Then:
# $$\boxed{Q^{\pi}(s,a) = E_{(s',a') \sim p(.|s,a)} [r(s,a)+\gamma Q^{\pi}(s',a')]}$$
# **(2)**
#
# Given the Bellman equation:
#
# $$V^{\pi}(s_0) = E[\Sigma^{\infty}_{t=0} \gamma^t r_t (s_t,\pi(s_t),s_{t+1} | s_0] $$
# and
# $$V^*(s_0) = \max E[\Sigma^{\infty}_{t=0}\gamma^t r_t (s_t,\pi(s_t),s_{t+1} | s_0] $$
#
# Then
#
# \begin{align*} V^*(s_0) & = \underset{\pi,a_0}{\max} E[r_0(s_0,a_0,s_1) + \Sigma^{\infty}_{t=1} \gamma^t r_t (s_t,\pi(s_t),s_(t+1)|s_0] \\
# & = \underset{a_0}{\max} E_{s_1} [r_0(s_0,a_0,s_1)+ \gamma \underset{\pi}{\max} \Sigma^{\infty}_{t=1} E_{s_2,s_3...} \{ \Sigma^{\infty}_{t=1} \gamma^{t-1} r_t(s_t,\pi(s_{t+1}),s_{t+1} \} | s_0] \\
# & = \underset{a_0}{\max} E_{s_1}[r_0(s_0,a_0,s_1)+\gamma V^* (s_1)]
# \end{align*}
#
# Then:
# $$ V^*(s) = \max E_{s'}[r(s,a,s')+\gamma V^*(s')|s] $$
#
# Since $V^*(s) = \underset{a'}{\max}Q^*(s,a')$
#
# \begin{align*}
# \Rightarrow V^*(s) & = \underset{a}{\max}E_{s'}[r(s,a,s')+\gamma \underset{a'}{\max} Q^*(s',a')|s,a] \\
# \Rightarrow Q^*(s,a) & = E_{s'}[r(s,a,s')+\gamma \underset{a'}{\max} Q^*(s',a')|s,a]
# \end{align*}
# $$
# \boxed{Q^*(s,a) = E_{s'\sim\pi^*(.|s,a)}[r(s,a)+\gamma \underset{a'}{\max} Q^*(s',a')]}$$
#
#
#
# **(3)**
#
# Given the loss function $\mathcal{L}(\theta)= [Q^*(s,a,\theta) - Q(s,a,\theta)]^2$
#
# By substituting $Q^*(s,a,\theta)$, we get:
#
# $$\mathcal{L}(\theta) = [E_{s'\sim \pi^*(.|s,a,s')}[r(s,a,s')+\gamma\max_{a'}Q^{*}(s',a,\theta)] - Q(s,a,\theta)]^2$$
# $$ \Rightarrow \boxed{\mathcal{L}(\theta)=E_{s' \sim \pi^*(.|s,a)}\Vert r+\gamma\max\max_{a'}Q(s',a',\theta)-Q(s,a,\theta)\Vert^{2}} $$
# ***
# The DQN-learning algorithm relies on these derivations to train the parameters $\theta$ of a Deep Neural Network:
#
# 1. At the state $s_t$, select the action $a_t$ with best reward using $Q_t$ and store the results;
#
# 2. Obtain the new state $s_{t+1}$ from the environment $p$;
#
# 3. Store $(s_t,a_t,s_{t+1})$;
#
# 4. Obtain $Q_{t+1}$ by minimizing $\mathcal{L}$ from a recovered batch from the previously stored results.
#
# ***
# __Question 6__ Implement the class ```Memory``` that stores moves (in a replay buffer) via ```remember``` and provides a ```random_access``` to these. Specify a maximum memory size to avoid side effects. You can for example use a ```list()``` and set by default ```max_memory=100```.
class Memory(object):
def __init__(self, max_memory=100):
self.max_memory = max_memory
self.memory = list()
def remember(self, m):
self.memory.append(m)
pass
def random_access(self):
r= np.random.randint(0, len(self.memory))
return self.memory[r]
# ***
# The pipeline we will use for training is given below:
def train(agent,env,epoch,prefix=''):
# Number of won games
score = 0
loss = 0
for e in range(1,epoch+1):
# At each epoch, we restart to a fresh game and get the initial state
state = env.reset()
# This assumes that the games will terminate
game_over = False
win = 0
lose = 0
while not game_over:
# The agent performs an action
action = agent.act(state)
# Apply an action to the environment, get the next state, the reward
# and if the games end
prev_state = state
state, reward, game_over = env.act(action)
# Update the counters
if reward > 0:
win = win + reward
if reward < 0:
lose = lose -reward
# Apply the reinforcement strategy
loss = agent.reinforce(prev_state, state, action, reward, game_over)
# Save as a mp4
if e % 10 == 0:
env.draw(prefix+str(e))
# Update stats
score += win-lose
print("Epoch {:03d}/{:03d} | Loss {:.4f} | Win/lose count {}/{} ({})"
.format(e, epoch, loss, win, lose, win-lose))
agent.save(name_weights=prefix+'model.h5',name_model=prefix+'model.json')
# ***
# __Question 7__ Implement the DQN training algorithm using a cascade of fully connected layers. You can use different learning rate, batch size or memory size parameters. In particular, the loss might oscillate while the player will start to win the games. You have to find a good criterium.
# +
class DQN(Agent):
def __init__(self, grid_size, epsilon = 0.1, memory_size=100, batch_size = 16,n_state=2):
super(DQN, self).__init__(epsilon = epsilon)
# Discount for Q learning
self.discount = 0.99
self.grid_size = grid_size
# number of state
self.n_state = n_state
# Memory
self.memory = Memory(memory_size)
# Batch size when learning
self.batch_size = batch_size
def learned_act(self, s):
acts_ = self.model.predict(np.reshape(s,(1,5,5,self.n_state)))
return np.argmax(acts_[0])
def reinforce(self, s_, n_s_, a_, r_, game_over_):
# Two steps: first memorize the states, second learn from the pool
self.memory.remember([s_, n_s_, a_, r_, game_over_])
input_states = np.zeros((self.batch_size, 5,5,self.n_state))
target_q = np.zeros((self.batch_size, 4))
for i in range(self.batch_size):
current_state, next_state, action, reward, game_over_ = self.memory.random_access() #sample some moves
input_states[i] = current_state
s_reshape=np.reshape(current_state,(1,5,5,self.n_state))
next_state=np.reshape(next_state,(1,5,5,self.n_state))
target_q[i] = self.model.predict(s_reshape)
if game_over_:
target_q[i,a_]= reward
else:
q = self.model.predict(next_state)
target_q[i,a_] = reward + self.discount * np.max(q)
######## FILL IN
# HINT: Clip the target to avoid exploiding gradients.. -- clipping is a bit tighter
target_q = np.clip(target_q, -3, 3)
l = self.model.train_on_batch(input_states, target_q)
return l
def save(self,name_weights='model.h5',name_model='model.json'):
self.model.save_weights(name_weights, overwrite=True)
with open(name_model, "w") as outfile:
json.dump(self.model.to_json(), outfile)
def load(self,name_weights='model.h5',name_model='model.json'):
with open(name_model, "r") as jfile:
model = model_from_json(json.load(jfile))
model.load_weights(name_weights)
model.compile("sgd", "mse")
self.model = model
class DQN_FC(DQN):
def __init__(self, *args, lr=0.1,**kwargs):
super(DQN_FC, self).__init__( *args,**kwargs)
# NN Model
model = Sequential()
model.add(Reshape((5*5*self.n_state,), input_shape=(5,5,self.n_state)))
model.add(Dense(128,input_shape=(50,),init='uniform',activation='relu'))
model.add(Dense(64,init='uniform',activation='relu'))
model.add(Dense(32,init='uniform',activation='relu'))
model.add(Dense(16,activation='relu'))
model.add(Dense(8,activation='relu'))
model.add(Dense(4,init='uniform',activation='linear'))
model.compile(sgd(lr=lr, decay=1e-4, momentum=0.0), "mse")
self.model = model
# -
env = Environment(grid_size=size, max_time=T, temperature=0.3)
agent = DQN_FC(size, lr=.1, epsilon = 0.2, memory_size=16000, batch_size = 32)
train(agent, env, epochs_train, prefix='fc_train_')
HTML(display_videos('fc_train_30.mp4'))
# ***
# ***
# __Question 8__ Implement the DQN training algorithm using a CNN (for example, 2 convolutional layers and one final fully connected layer).
class DQN_CNN(DQN):
def __init__(self, *args,lr=0.1,**kwargs):
super(DQN_CNN, self).__init__(*args,**kwargs)
###### FILL IN
model = Sequential()
model.add(Conv2D(32,kernel_size=(2,2),strides=(1,1),
activation='relu',input_shape=(5,5,self.n_state)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(64, (1, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(4, activation='linear'))
model.compile(sgd(lr=lr, decay=1e-4, momentum=0.0), "mse")
print(model.summary())
self.model = model
env = Environment(grid_size=size, max_time=T, temperature=0.3)
agent = DQN_CNN(size, lr=.1, epsilon = 0.1, memory_size=2000, batch_size = 32)
train(agent,env,epochs_train,prefix='cnn_train_')
HTML(display_videos('cnn_train_30.mp4'))
# ***
# ***
# __Question 9__ Test both algorithms and compare their performances. Which issue(s) do you observe? Observe also different behaviors by changing the temperature.
# ### Testing CNN & FC with temperature set to 0.3
# +
env = Environment(grid_size=size, max_time=T,temperature=0.3)
agent_cnn = DQN_CNN(size, lr=.1, epsilon = 0.1, memory_size=2000, batch_size = 32)
agent_cnn.load(name_weights='cnn_train_model.h5',name_model='cnn_train_model.json')
agent_fc = DQN_FC(size, lr=.1, epsilon = 0.1, memory_size=2000, batch_size = 32)
agent_cnn.load(name_weights='fc_train_model.h5',name_model='fc_train_model.json')
print('Test of the CNN')
test(agent_cnn,env,epochs_test,prefix='cnn_test_03_')
print('Test of the FC')
test(agent_fc,env,epochs_test,prefix='fc_test_03_')
# -
HTML(display_videos('cnn_test_03_30.mp4'))
HTML(display_videos('fc_test_03_30.mp4'))
# ### Testing CNN & FC with temperature set to 0.9
# +
env = Environment(grid_size=size, max_time=T,temperature=0.3)
agent_cnn = DQN_CNN(size, lr=.1, epsilon = 0.1, memory_size=2000, batch_size = 32)
agent_cnn.load(name_weights='cnn_train_model.h5',name_model='cnn_train_model.json')
agent_fc = DQN_FC(size, lr=.1, epsilon = 0.1, memory_size=2000, batch_size = 32)
agent_cnn.load(name_weights='fc_train_model.h5',name_model='fc_train_model.json')
print('Test of the CNN')
test(agent_cnn,env,epochs_test,prefix='cnn_test_09_')
print('Test of the FC')
test(agent_fc,env,epochs_test,prefix='fc_test_09_')
# -
HTML(display_videos('cnn_test_09_30.mp4'))
HTML(display_videos('fc_test_09_30.mp4'))
# The CNN model seems to perform better and to be more stable than the FC's.
#
# When increasing the temperature, results improve for both models. Something expected and that makes sense since we are increasing the percentage of cheeses and bonus on the board
#
# Yet, in both models, the agent tends not exploring the island anymore.
# ***
#
# The algorithm tends to not explore the map which can be an issue. We propose two ideas in order to encourage exploration:
# 1. Incorporating a decreasing $\epsilon$-greedy exploration. You can use the method ```set_epsilon```
# 2. Append via the environment a new state that describes if a cell has been visited or not
#
# ***
# __Question 10__ Design a new ```train_explore``` function and environment class ```EnvironmentExploring``` to tackle the issue of exploration.
#
#
# +
def train_explore(agent,env,epoch,prefix=''):
# Number of won games
score = 0
loss = 0
for e in range(1,epoch+1):
# At each epoch, we restart to a fresh game and get the initial state
state = env.reset()
# This assumes that the games will terminate
game_over = False
win = 0
lose = 0
while not game_over:
# The agent performs an action
action = agent.act(state,train=True)
# Apply an action to the environment, get the next state, the reward
# and if the games end
prev_state = state
state, reward, game_over = env.act(action)
# Update the counters
if reward > 0:
win = win + reward
if reward < 0:
lose = lose - reward
# Apply the reinforcement strategy
loss = agent.reinforce(prev_state, state, action, reward, game_over)
# Save as a mp4
if e % 10 == 0:
env.draw(prefix+str(e))
# Update stats
score += win-lose
print("Epoch {:03d}/{:03d} | Loss {:.4f} | Win/lose count {}/{} ({})"
.format(e, epoch, loss, win, lose, win-lose))
agent.save(name_weights=prefix+'model.h5',name_model=prefix+'model.json')
class EnvironmentExploring(object):
def __init__(self, grid_size=10, max_time=500, temperature=0.1):
grid_size = grid_size+4
self.grid_size = grid_size
self.max_time = max_time
self.temperature = temperature
#board on which one plays
self.board = np.zeros((grid_size,grid_size))
self.position = np.zeros((grid_size,grid_size))
# coordinate of the rat
self.x = 0
self.y = 1
#
self.malus_position = np.zeros((grid_size,grid_size))
#
# self time
self.t = 0
self.scale=16
self.to_draw = np.zeros((max_time+2, grid_size*self.scale, grid_size*self.scale, 3))
def draw(self,e):
skvideo.io.vwrite(str(e) + '.mp4', self.to_draw)
def get_frame(self,t):
b = np.zeros((self.grid_size,self.grid_size,3))+128
b[self.board>0,0] = 256
b[self.board < 0, 2] = 256
b[self.x,self.y,:]=256
b[-2:,:,:]=0
b[:,-2:,:]=0
b[:2,:,:]=0
b[:,:2,:]=0
b = cv2.resize(b, None, fx=self.scale, fy=self.scale, interpolation=cv2.INTER_NEAREST)
self.to_draw[t,:,:,:]=b
def act(self, action):
"""This function returns the new state, reward and decides if the
game ends."""
self.get_frame(int(self.t))
self.position = np.zeros((self.grid_size, self.grid_size))
self.position[0:2,:]= -1
self.position[:,0:2] = -1
self.position[-2:, :] = -1
self.position[:,-2:] = -1
self.position[self.x, self.y] = 1
if action == 0:
if self.x == self.grid_size-3:
self.x = self.x-1
else:
self.x = self.x + 1
elif action == 1:
if self.x == 2:
self.x = self.x+1
else:
self.x = self.x-1
elif action == 2:
if self.y == self.grid_size - 3:
self.y = self.y - 1
else:
self.y = self.y + 1
elif action == 3:
if self.y == 2:
self.y = self.y + 1
else:
self.y = self.y - 1
else:
RuntimeError('Error: action not recognized')
self.t = self.t + 1
# #reward = self.board[self.x, self.y]
reward = 0
if train:
reward = -self.malus_position[self.x, self.y]
self.malus_position[self.x, self.y] = 0.1
reward = reward + self.board[self.x, self.y]
#
self.board[self.x, self.y] = 0
game_over = self.t > self.max_time
#state = np.concatenate((self.board.reshape(self.grid_size, self.grid_size,1),
# self.position.reshape(self.grid_size, self.grid_size,1)),axis=2)
#
# 3 "feature" states instead of 2
state = np.concatenate((self.malus_position.reshape(self.grid_size, self.grid_size,1), #OK ajouté dans init
self.board.reshape(self.grid_size, self.grid_size,1), #OK deja dans init
self.position.reshape(self.grid_size, self.grid_size,1)),axis=2)#OK deja init
#
state = state[self.x-2:self.x+3,self.y-2:self.y+3,:]
return state, reward, game_over
def reset(self):
"""This function resets the game and returns the initial state"""
self.x = np.random.randint(3, self.grid_size-3, size=1)[0]
self.y = np.random.randint(3, self.grid_size-3, size=1)[0]
bonus = 0.5*np.random.binomial(1,self.temperature,size=self.grid_size**2)
bonus = bonus.reshape(self.grid_size,self.grid_size)
malus = -1.0*np.random.binomial(1,self.temperature,size=self.grid_size**2)
malus = malus.reshape(self.grid_size, self.grid_size)
self.to_draw = np.zeros((self.max_time+2, self.grid_size*self.scale, self.grid_size*self.scale, 3))
malus[bonus>0]=0
self.board = bonus + malus
self.position = np.zeros((self.grid_size, self.grid_size))
self.position[0:2,:]= -1
self.position[:,0:2] = -1
self.position[-2:, :] = -1
self.position[:,-2:] = -1
self.board[self.x,self.y] = 0
self.t = 0
#state = np.concatenate((
# self.board.reshape(self.grid_size, self.grid_size,1),
# self.position.reshape(self.grid_size, self.grid_size,1)),axis=2)
# 3 "feature" states instead of 2
#
state = np.concatenate((self.malus_position.reshape(self.grid_size, self.grid_size,1), #OK ajouté dans init
self.board.reshape(self.grid_size, self.grid_size,1), #OK deja dans init
self.position.reshape(self.grid_size, self.grid_size,1)),axis=2)#OK deja init
#
state = state[self.x - 2:self.x + 3, self.y - 2:self.y + 3, :]
return state
## use those samples of code:
#In train explore: OK
#state, reward, game_over = env.act(action, train=True)
#
## In Environment exploring: #OK
# You will have to change n_state to 3 because you will use one more layer!
#reward = 0
#if train:
# reward = -self.malus_position[self.x, self.y]
#self.malus_position[self.x, self.y] = 0.1
#
#reward = reward + self.board[self.x, self.y]
# 3 "feature" states instead of 2
#state = np.concatenate((self.malus_position.reshape(self.grid_size, self.grid_size,1), #OK ajouté dans init
# self.board.reshape(self.grid_size, self.grid_size,1), #OK deja dans init
# self.position.reshape(self.grid_size, self.grid_size,1)),axis=2)#OK deja init
# -
# Training
env = EnvironmentExploring(grid_size=size, max_time=T, temperature=0.3)
agent = DQN_CNN(size, lr=.1, epsilon = 0.1, memory_size=2000, batch_size = 32,n_state=3)
train_explore(agent, env, epochs_train, prefix='cnn_train_explore_')
HTML(display_videos('cnn_train_explore_30.mp4'))
# Evaluation
test(agent,env,epochs_test,prefix='cnn_test_explore_')
HTML(display_videos('cnn_test_explore_30.mp4'))
# ***
# ***
# __BONUS question__ Use the expert DQN from the previous question to generate some winning games. Train a model that mimicks its behavior. Compare the performances.
#
# ***
| 34,038 |
/labs/06-resampling_and_the_bootstrap/lab.ipynb | 8e06b454fbbe9ca05dcb8c0911efd6b3bc28c416 | [] | no_license | IceSelkie/dsc10-2020-fa-public | https://github.com/IceSelkie/dsc10-2020-fa-public | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 27,558 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Lab 6: Resampling and the Bootstrap
#
# Welcome to Lab 6! This lab is due on **Friday, 11/20 at 11:59pm.**
#
# In lecture, we saw an example of *estimation*. In WWII, the Allies wanted to know how many warplanes the Germans had (some number `N`, a *population parameter*), and they needed to estimate that quantity knowing only a random sample of the planes' serial numbers (from 1 to `N`). For example, one estimate was twice the mean of the sample serial numbers.
#
# We investigated the random variation in these estimates by simulating sampling from the population many times and computing estimates from each sample. In real life, if the Allies had known what the population looked like, they would have known `N` and would not have had any reason to think about random sampling. They didn't know what the population looked like, so they couldn't have run the simulations we did. So that was useful as an exercise in *understanding random variation* in an estimate, but not as a tool for practical data analysis.
#
# Now we'll flip that idea on its head to make it practical. Given *just* a random sample of serial numbers, we'll estimate `N`, and then we'll use simulation to find out how accurate our estimate probably is, without ever looking at the whole population. This is an example of *statistical inference*.
#
# As usual, **run the cell below** to prepare the lab and the automatic tests.
# +
import numpy as np
import babypandas as bpd
import math
# These lines set up graphing capabilities.
import matplotlib
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# %matplotlib inline
import otter
grader = otter.Notebook()
# -
# ## 0. Percentiles
# Numerical data can be sorted in increasing or decreasing order. Thus the values of a numerical data set have some rank order. A `percentile` is the value at a particular rank. The p`th` percentile is the value in the set that is at least as large as p% of the elements in the set.
# #### Question 0.1
#
# Say, you are in a class with 10 students and the grades of all students in the class are stored in the array `grades`. Your score is 84.
#
# Which of the following statements are true?
#
# 1. The highest score is the 100th percentile
# 2. Your score is more than 80th percentile
# 3. Your score is less than 81st percentile
# 4. Your score is less than 90th percentile
# 5. 78 is the 50th percentile
#
# Assign `true_percentile` to a list of the correct statements.
grades = np.array([56, 65, 67, 72, 74, 78, 78, 80, 84, 94])
true_percentile = ...
true_percentile
# + deletable=false editable=false
grader.check("q01")
# -
# #### Question 0.2
# The `percentile` function takes two arguments: a rank between 0 and 100, and an array. It returns the corresponding percentile of the array: `percentile(array, rank)`. Calculate the 50`th` percentile of the `grades` array using the `percentile` function.
p_50 = ...
p_50
# + deletable=false editable=false
grader.check("q02")
# -
# #### Question 0.3
# Calculate the median value of the `grades` array and **manually** compare it to your answer from `Question 0.2`. Set variable `same` to `True` if the two values are the same, and `False` if they are different. Do not use if/else for this question.
median = ...
print(median)
same = ...
# + deletable=false editable=false
grader.check("q03")
# -
# ## 1. Preliminaries
# Remember the setup: We (the Allies in World War II) want to know the number of warplanes fielded by the Germans. That number is `N`. The warplanes have serial numbers from 1 to `N`, so `N` is also equal to the largest serial number on any of the warplanes.
#
# We only see a small number of serial numbers (assumed to be a random sample with replacement from among all the serial numbers), so we have to use estimation.
# #### Question 1.1
# Is `N` a population parameter or a statistic? If we compute a number using our random sample that's an estimate of `N`, is that a population parameter or a statistic? Assign either 1, 2, 3, or 4 to the variable `preliminaries_q1` below.
# 1. `N` is a population parameter. An estimate of `N` from our random sample is a population perameter.
# 2. `N` is a population parameter. An estimate of `N` from our random sample is a statistic.
# 3. `N` is a statistic. An estimate of `N` from our random sample is a population parameter.
# 4. `N` is a statistic. An estimate of `N` from our random sample is a statistic.
preliminaries_q1 = ...
# + deletable=false editable=false
grader.check("q11")
# -
# To make the situation realistic, we're going to hide the true number of warplanes from you. You'll have access only to this random sample:
observations = bpd.read_csv("data/serial_numbers.csv")
num_observations = observations.shape[0]
observations
# #### Question 1.2
# Define a function named `plot_serial_numbers` to make a histogram of any table of serial numbers. It should take one argument, a table like `observations` with one column called `"serial_number"`. It should plot a histogram of the values in the column **using bins of width 1** ranging from **1 to 200** but return nothing. Then, call that function to make a histogram of `observations`.
# +
def plot_serial_numbers(numbers):
...
# Assuming the lines above produce a histogram, this next
# line may make your histograms look nicer. Feel free to
# delete it if you want.
plt.ylim(0, .25)
plot_serial_numbers(observations)
# -
# ***Check your answer***: Your histogram should have bars that are all the same height and the plot's x axis should read "serial_number" and have a range starting from 0 and ending at 200
# #### Question 1.3
# Why are all the bars the same height, and will they always be? Think about why this is the case. Assign either 1, 2, 3, or 4 to the variable `preliminaries_q3` below.
# 1. Each serial number is in a bin of width 1, and all bins need to have the same area, so all of the bars are the same height. In general, the bars will always be the same height for a set of observations.
# 2. Each serial number has the same length, so all of the bars are the same height. In general, all of the bars will always have the same height for a set of observations.
# 3. Each serial number shows up the same number of times in this set of observations, so all of the bars are the same height. In general, the bars might have different heights for another set of observations.
# 4. Each serial number shows up once in this set of observations, so all of the bars are the same height. In general, the bars will always have the same height for a set of observations.
preliminaries_q3 = ...
# + deletable=false editable=false
grader.check("q13")
# -
# #### Question 1.4
# In lecture, we saw that one way to estimate `N` was to take twice the mean of the serial numbers we see. Write a function that computes that statistic. It should take as its argument an array of serial numbers and return twice their mean. Call it `mean_based_estimator`.
#
# After that, use it to compute an estimate of `N` called `mean_based_estimate`.
# +
def mean_based_estimator(nums):
...
mean_based_estimate = ...
mean_based_estimate
# + deletable=false editable=false
grader.check("q14")
# -
# #### Question 1.5
# We also estimated `N` using the biggest serial number in the sample. Compute it, giving it the name `max_estimate`.
max_estimate = ...
max_estimate
# + deletable=false editable=false
grader.check("q15")
# -
# #### Question 1.6
# Look at the values of `max_estimate` and `mean_based_estimate` that we happened to get for our dataset. The value of `max_estimate` tells you something about `mean_based_estimate`. Could our current `mean_based_estimate` possibly be equal to `N` (at least if we round it to the nearest integer)? If not, is it definitely higher, definitely lower, or can we not tell? Assign one of the choices (`1 - 6`) to the variable `preliminaries_q6` below.
# 1. Yes, our `mean_based_estimate` for this sample could equal `N`.
# 2. No, our `mean_based_estimate` for this sample cannot be equal to `N`, it is definitely lower by roughly 3.
# 3. No, our `mean_based_estimate` for this sample cannot be equal to `N`, it is definitely lower by at least 12.
# 4. No, our `mean_based_estimate` for this sample cannot be equal to `N`, it is definitely higher by roughly 3.
# 5. No, our `mean_based_estimate` for this sample cannot be equal to `N`, it is definitely higher by at least 12.
# 6. No, our `mean_based_estimate` for this sample cannot be equal to `N`, but we cannot tell if it is lower or higher.
preliminaries_q6 = ...
# + deletable=false editable=false
grader.check("q16")
# -
# We can't just confidently proclaim that `max_estimate` or `mean_based_estimate` is equal to `N`. What if we're really far off? So we want to get a sense of the accuracy of our estimates.
# ## 2. Resampling
# Therefore, we'll use resampling. That is, we won't exactly simulate the observations the Allies would have really seen. Rather we sample from our sample, or "resample."
#
# Why does that make any sense?
#
# When we tried to estimate `N`, we would have liked to use the whole population. Since we had only a sample, we used that to estimate `N` instead.
#
# This time, we would like to use the population of serial numbers to *run a simulation* about estimates of `N`. But we still only have our sample. We use our sample in place of the population to run the simulation.
#
# So there is a simple analogy between estimating `N` and simulating the variability of estimates.
#
# $$\text{computing }N\text{ from the population}$$
# $$:$$
# $$\text{computing an estimate of }N\text{ from a sample}$$
#
# $$\text{as}$$
#
# $$\text{simulating the distribution of estimates of }N\text{ using samples from the population}$$
# $$:$$
# $$\text{simulating an (approximate) distribution of estimates of }N\text{ using resamples from a sample}$$
# #### Question 2.1
# Write a function called `simulate_resample`. It should generate a resample from the observed serial numbers in `observations` and return that resample. (The resample should be a table like `observations`.) It should take no arguments.
def simulate_resample():
...
# +
# This is a little magic to make sure that you see the same results we did.
np.random.seed(123)
one_resample = simulate_resample()
one_resample
# + deletable=false editable=false
grader.check("q21")
# -
# Let's make one resample.
# Later, we'll use many resamples at once to see what estimates typically look like. We don't often pay attention to single resamples, so it's easy to misunderstand them. Let's examine some individual resamples before we start using them.
# #### Question 2.2
# Make a histogram of your resample using the plotting function you defined earlier in this lab, `plot_serial_numbers`, **and** a separate histogram of the original observations.
...
...
# #### Question 2.3
# Which of the following are true:
# 1. In the plot of the resample, there are no bars at locations that weren't there in the plot of the original observations.
# 2. In the plot of the original observations, there are no bars at locations that weren't there in the plot of the resample.
# 3. The resample has exactly one copy of each serial number.
# 4. The original observations had exactly one copy of each serial number.
#
# Assign `true_statements` to a list of the correct statements
true_statements = ...
# + deletable=false editable=false
grader.check("q23")
# -
# #### Question 2.4
# Create 2 more resamples. For each one, plot it as well as compute the max- and mean-based estimates using that resample.
# +
resample_0 = ...
...
mean_based_estimate_0 = ...
max_based_estimate_0 = ...
print("Mean-based estimate for resample 0:", mean_based_estimate_0)
print("Max-based estimate for resample 0:", max_based_estimate_0)
resample_1 = ...
...
mean_based_estimate_1 = ...
max_based_estimate_1 = ...
print("Mean-based estimate for resample 1:", mean_based_estimate_1)
print("Max-based estimate for resample 1:", max_based_estimate_1)
# -
# You may find that the max-based estimates from the resamples are both exactly 135. You will probably find that the two mean-based estimates do differ from the sample mean-based estimate (and from each other).
#
# #### Question 2.5
# Using probability that you've learned, compute the exact chance that a max-based estimate from one *resample* of our observations sample is 135 and assign it to the variable `resampling_q5` below. It may be useful to recall that the size of `observations` is 17.
resampling_q5 = ...
resampling_q5
# + deletable=false editable=false
grader.check("q25")
# -
# Think about why a mean-based estimate from a resample is less often exactly equal to the mean-based estimate from the original sample as compared to a max-based estimate.
# ## 3. Simulating with resampling
# Since resampling from a sample looks just like sampling from a population, the code should look almost the same. That means we can write a function that simulates either sampling from a population or resampling from a sample. If we pass it a population as its argument, it will do the former; if we pass it a sample, it will do the latter.
#
# #### Question 3.1
# Write a function called `simulate_estimates`. It should take 4 arguments:
# 1. A table from which the data should be sampled. The table will have 1 column named `"serial_number"`.
# 2. The size of each sample from that table, an integer. (For example, to do resampling, we would pass for this argument the number of rows in the table.)
# 3. A function that computes a statistic of a sample. This argument is a *function* that takes an array of serial numbers as its argument and returns a number.
# 4. The number of replications to perform.
#
# It should simulate many samples with replacement from the given table. (The number of samples is the 4th argument.) For each of those samples, it should compute the statistic on that sample. Then it should return an array containing each of those statistics. The code below provides an example use of your function and describes how you can verify that you've written it correctly.
#
# **Hint:** For the next couple of questions, you may find the [percentile function](https://numpy.org/doc/stable/reference/generated/numpy.percentile.html) useful.
# +
def simulate_estimates(original_table, sample_size, statistic, num_replications):
# Our implementation of this function took 4 to 5 short lines of code.
...
# This should generate an empirical histogram of twice-mean estimates
# of N from samples of size 50 if N is 1000. This should be a bell-shaped
# curve centered at 1000 with most of its mass in [800, 1200]. To verify your
# answer, make sure that's what you see!
example_estimates = simulate_estimates(
bpd.DataFrame().assign(serial_number = np.arange(1, 1000+1)),
50,
mean_based_estimator,
10000)
bpd.DataFrame().assign(mean_based_estimate = example_estimates).plot(kind = 'hist', bins=np.arange(0, 1500, 25))
# + deletable=false editable=false
grader.check("q31")
# -
# Now we can go back to the sample we actually observed (the table `observations`) and estimate how much our mean-based estimate of `N` would have varied from sample to sample.
# #### Question 3.2
# Using the bootstrap and the sample `observations`, simulate the approximate distribution of *mean-based estimates* of `N`. Use 5,000 replications.
# We have provided code that plots a histogram, allowing you to visualize the simulated estimates.
bootstrap_estimates = ...
bpd.DataFrame().assign(mean_based_estimate = bootstrap_estimates).plot(kind = 'hist', bins=np.arange(0, 200, 4))
# + deletable=false editable=false
grader.check("q32")
# -
# #### Question 3.3
# Compute an interval that covers the middle 95% of the bootstrap estimates. Verify that your interval looks like it covers 95% of the area in the histogram above.
left_end = ...
right_end = ...
print("Middle 95% of bootstrap estimates: [{:f}, {:f}]".format(left_end, right_end))
# + deletable=false editable=false
grader.check("q33")
# -
# #### Question 3.4
# Let's say that `N` was actually 150! Write code that simulates the sampling and bootstrapping process again, as follows:
#
# 1. Generate a new set of random observations the Allies might have seen by sampling from the population table we have created for you below. Make your sample size something reasonable.
# 2. Compute an estimate of `N` from these new observations, using `mean_based_estimator`.
# 3. Using only the new observations, compute 5,000 bootstrap estimates of `N`.
# 4. Plot these bootstrap estimates and compute an interval covering the middle 95%.
# +
population = bpd.DataFrame().assign(serial_number = np.arange(1, 150+1))
new_observations = population.sample(70)
...
new_left_end = ...
new_right_end = ...
print("Middle 95% of bootstrap estimates: [{:f}, {:f}]".format(new_left_end, new_right_end))
# + deletable=false editable=false
grader.check("q34")
# -
# #### Question 3.5
# If you ran your cell above many, many times, approximately what percentage of your 95% confidence intervals will include `N`? Assign either 1, 2, 3, or 4 to the variable `simulating_q5` below.
# 1. 100%
# 2. 95%
# 3. 5%
# 4. It's impossible to tell.
simulating_q5 = ...
# + deletable=false editable=false
grader.check("q35")
# -
# # Finish Line
#
# Congratulations! You are done with lab06
#
# To submit your assignment:
#
# 1. Select `Kernel -> Restart & Run All` to ensure that you have executed all cells, including the test cells.
# 2. Read through the notebook to make sure everything is fine and all tests passed.
# 3. Run the cell below to run all tests, and make sure that they all pass.
# 4. Download your notebook using `File -> Download as -> Notebook (.ipynb)`, then upload your notebook to Gradescope.
# For your convenience, you can run this cell to run all the tests at once!
grader.check_all()
| 18,245 |
/DeterministicPCA.ipynb | 67fc4b503afe1d9c23e1119c6fb7ad2798b1cb31 | [
"MIT"
] | permissive | MarcelloVendruscolo/ProbabilisticMachineLearning | https://github.com/MarcelloVendruscolo/ProbabilisticMachineLearning | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 443,588 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + id="iRjddwOiWzr5" tags=["keep"]
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import math
# + [markdown] id="wjyOyZDn6rk2"
# ## Exercise 11.1: PCA on MNIST
#
# In the lectures the principal component analysis (PCA) was introduced as a
# method for dimensionality reduction and feature extraction, i.e., to condense
# data by mapping it to a lower dimensional space of the most important features.
#
# Let
# \begin{equation*}
# \mathbf{X} = \begin{bmatrix} \mathbf{x}_1^\intercal \\ \vdots \\
# \mathbf{x}_N^\intercal \end{bmatrix} \in \mathbb{R}^{N \times D}
# \end{equation*}
# be a matrix of $N$ data samples $\mathbf{x}_n \in \mathbb{R}^D$, which are
# centered around zero.
# We consider a PCA with $M < D$ components.
#
# To project the data points $\mathbf{x}_n$ to the $M$-dimensional space that is
# defined by the $M$ principal components of $\mathbf{X}$, the so-called principal
# subspace of $\mathbf{X}$, we can use the singular value decomposition of
# $\mathbf{X}$. Let $\mathbf{X} = \mathbf{U} \mathbf{\Sigma} \mathbf{V}^\intercal$ be the
# singular value decomposition of the data matrix $\mathbf{X}$ with the singular
# values sorted in descending order.
#
# Then the projection $\mathbf{z}_n$ of
# data point $\mathbf{x}_n$ to the principal subspace of $\mathbf{X}$ is given by
# \begin{equation}
# \mathbf{z}_n^\intercal = \mathbf{x}_n^\intercal \begin{bmatrix} \mathbf{v}_1 & \cdots & \mathbf{v}_M \end{bmatrix},
# \end{equation}
# where $\mathbf{v}_i$ is the $i$th column of matrix $\mathbf{V}$. The vector
# $\mathbf{z}_n$ can be seen as an encoding of the data point
# $\mathbf{x}_n$ in a lower dimensional space that is constructed by the directions
# for which the data shows the largest variations.
#
# - **NOTE:** The
# singular value decomposition of a matrix $\mathbf{X} \in \mathbb{R}^{N \times D}$
# is defined as a factorization of the form $\mathbf{X} = \mathbf{U} \mathbf{\Sigma}
# \mathbf{V}^\intercal$ where $\mathbf{U} \in \mathbb{R}^{N \times N}$ and
# $\mathbf{V} \in \mathbb{R}^{D \times D}$ are orthogonal matrices and
# $\mathbf{\Sigma} \in \mathbb{R}^{N \times D}$ is a rectangular diagonal matrix with
# non-negative numbers on the diagonal. The diagonal entries of $\mathbf{\Sigma}$ are
# the so-called singular values of $\mathbf{X}$. A common convention is to sort
# the singular values in descending order, in which case the diagonal matrix $\mathbf{\Sigma}$ is uniquely determined by $\mathbf{X}$.
#
# In this exercise, we perform and analyse PCA of the MNIST image data set with $k = 2$ principal components.
# + id="3_ghoR-Bxh7c" tags=["keep"] colab={"base_uri": "https://localhost:8080/", "height": 471, "referenced_widgets": ["4ccd141c98f54d90a9cd11aa91d3d8db", "a34f4a7ed0aa4959865fb6c18986c8da", "f75356b111904220955975e86d84b41b", "db7c9423a3d1406a9b8966aa2832292e", "f5f181b5d01847b7b5d8b994029b0e1d", "31e0ba7ceae1456ab227c3f6be6639aa", "f3c4195724224d828230727761f49ac2", "c931646be58f4fa784c3b709594405f9", "0a6dc847c2564c92b1b0321f242e7996", "4bd7c3ead5684cdabbb051b0ef13b04b", "6ecf1e979b2c4ecbad9ef1fd59927cee", "ce58554fdafe43ed8bb84e1b25ede9ec", "8254ddf4d1f54f0b89c9d760ad226b2d", "905b3b6803af496faf4034426e6e4dac", "7975a6c7dd0f47d69f23b472d7787cc7", "d24f97fbd12245618b5e047c56ab7bce", "a8b6d601a7404561bd3557963bb824ee", "05253a71049c4ac28763a6e35d345c7f", "3b4bb56c17a546e7a5e777221bbe39b8", "b929180018304b2cac36ce8c82bc3da4", "68a352513337409d8e1960bd0ffb46dd", "86cedf4c83a845bb83b966d3c5e3a535", "3a407ae95dc14cc4835db525a6ab5c0f", "026533823a6b48d49a2f99e32d872168", "8afdf2837c5a48cdaafa95f7edb9b009", "b6cdfbf584cb4ffb87f7bf6ee310ced1", "e1d12678f9f04619b3916f07f88bddad", "1ec6de04ff464db2bb5b80be79160771", "96cf6751628f402db35adf66eecd5a5e", "8b9a19d8bff044d4b6bcad9bd68bae3a", "225e2d9f8d904b9b89f85c1637b15cba", "8400dcf30433453896870fccc519ebdd", "334cc9537c694d64ad7110c9f9bf2981", "cbaddae4996d4534950fb0b13073dbb8", "4efa3f42f07944eb8977c914a78f4fa7", "9227c8c3297648c5b45d02e9d541a6c0", "3ac44b9d564b47e0bb9c2ae747737632", "da65b97e1ec848f3893d46375fb82a3b", "883e3560a05a4fd18dcfa38608747615", "cba7e0a667d14b299c83004b58be05c0", "dc039fe63c9a404c9b168c6f9a5acd96", "b48b4ab936194ec6b51b0be1671a3fcc", "891a8cf764d549c7bbab1782f1a3412b", "cffd8ffd876948c6ac21974d9de826ee"]} executionInfo={"status": "ok", "timestamp": 1633497614385, "user_tz": -120, "elapsed": 11601, "user": {"displayName": "Marcello Vendruscolo", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "10393122258563683124"}} outputId="efb2306c-6bbe-4062-99b9-3957517a14bf"
# We use torch and torchvision for automatically downloading the MNIST dataset
import torch
import torchvision
import torchvision.transforms as transforms
# download MNIST datasets and flatten images to vectors of length 784
transform = transforms.Compose([transforms.ToTensor(),
transforms.Lambda(lambda x: x.view(-1))])
trainset = torchvision.datasets.MNIST(root='./data', train=True,
download=True, transform=transform)
testset = torchvision.datasets.MNIST(root='./data', train=False,
download=True, transform=transform)
# extract a complete PyTorch dataset
def extract(dataset):
datasize = len(dataset)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=datasize, shuffle=False)
return next(iter(dataloader))
# extract all training and test images and labels into PyTorch tensors
train_images, train_labels = extract(trainset)
test_images, test_labels = extract(testset)
# It is possible to do the exercise both using torch or numpy.
# In the second case, you can just convert the matrices to numpy using:
# train_images = train_images.numpy()
# train_labels = train_labels.numpy()
# test_images = test_images.numpy()
# test_labels = test_labels.numpy()
# + [markdown] id="SMqxXreK5ghh"
#
# Let $\mathbf{X} \in [0,1]^{N \times 784}$ be a matrix of the MNIST training data set with $N = 60000$, where each row represents a training image. Moreover, let $\mathbf{X} - \overline{\mathbf{x}}^\intercal = \mathbf{U} \mathbf{\Sigma} \mathbf{V}^\intercal$ be the singular value decomposition of the MNIST training data after them mean has been removed, where $\overline{\mathbf{x}} = \frac{1}{N} \sum_{i=1}^N \mathbf{x}_i$.
#
# We compute the singular value decomposition $\mathbf{U} \boldsymbol{\Sigma} \mathbf{V}^\intercal$ with the function [`torch.svd`](https://pytorch.org/docs/stable/torch.html#torch.svd).
# + id="wkfNjs3XbMyC" tags=["keep"]
# center training images
train_mean = train_images.mean(axis=0)
train_images_centered = train_images - train_mean
U, S, V = torch.svd(train_images_centered)
# + [markdown] id="g9Jt2du7O4z1"
# ### Item (a)
#
# Let `train_encoding` $\in \mathbb{R}^{60000 \times 2}$ and `test_encoding` $\in \mathbb{R}^{10000 \times 2}$ be the two-dimensional principal subspace of the MNIST training images. Compute the two encodings of the images in the MNIST training and the test data set. Do it with the help of $\mathbf{U}$, $\boldsymbol{\Sigma}$, and $\mathbf{V}$.
#
# *Hints*: Rememeber that the presence of the center $\overline{\textbf{x}}$ needs to be accounted for.
#
# + id="XnM5TzfNxh7f" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1633499108677, "user_tz": -120, "elapsed": 424, "user": {"displayName": "Marcello Vendruscolo", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "10393122258563683124"}} outputId="30182feb-937d-445c-81a2-28060d35e19c"
train_encoding = torch.matmul(train_images_centered, V[:,0:2])
print('Train_encoding: ', train_encoding.shape)
test_encoding = torch.matmul(test_images - train_mean, V[:,0:2])
print('Test_encoding: ', test_encoding.shape)
# + [markdown] id="QqA3hX3ybMyZ"
# Generate 2D scatter plots showing of the datapoints you computed in the last item. The function `plot_encoding` bellow can be used to generate a plot of the latent space.
# + id="XnA0MGjBbMyc" tags=["keep"]
def plot_encoding(train_data, test_data):
# create two plots side by side
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15, 6))
# plot data
titles = ["training data", "test data"]
for (data, title, ax) in zip([train_data, test_data], titles, axes.flat):
encodings, labels = data
scatter = ax.scatter(encodings[:, 0], encodings[:, 1],
c=labels, cmap=plt.cm.tab10, vmin=-0.5,
vmax=9.5, alpha=0.7)
ax.set_xlabel("$z_1$")
ax.set_ylabel("$z_2$")
ax.set_title(title)
# add colorbar
cb = fig.colorbar(scatter, ticks=np.arange(0, 10), ax=axes.ravel().tolist())
cb.ax.set_title("digit")
return fig
# + colab={"base_uri": "https://localhost:8080/", "height": 406} id="CEC8YzTmxh7h" tags=["keep"] executionInfo={"status": "ok", "timestamp": 1633499115673, "user_tz": -120, "elapsed": 3376, "user": {"displayName": "Marcello Vendruscolo", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "10393122258563683124"}} outputId="c429bb32-a86a-421f-cd5b-b14919478a6a"
plot_encoding((train_encoding, train_labels), (test_encoding, test_labels))
plt.show()
# + [markdown] id="QjsGYI6pbMyt"
# ### Item (b)
#
# With the help of the matrices $\mathbf{U}$, $\boldsymbol{\Sigma}$, and $\mathbf{V}$ computed in the last item. Map the `test_encoding` from the enconding space back to the space of image obtaining: `test_reconstruction` $\in \mathbb{R}^{10000 \times 784}$ which will consist of the images in the MNIST test data set obtained considering only the two principal components.
# + id="eNv8RcWWxh7k"
test_reconstruction = torch.matmul(test_encoding, np.transpose(V[:,0:2])) + train_mean
# + [markdown] id="QjtEPRQnbMyv"
# Plot some test images and their reconstructed counterparts. You can use the function `plot_reconstruction` bellow do the plotting. Which digits can be reconstructed and decoded quite well, and which ones seem to be more challenging?
# + id="yaF1Pr4Sxh7k"
# plot at most 8 random reconstructions
def plot_reconstruction(originals, reconstructions, labels, nrow=2):
# indices of displayed samples
n = originals.size(0)
indices = range(n) if n <= 8 else np.random.choice(n, size=8, replace=False)
# compute number of rows and columns of grid
num_samples = len(indices)
num_cols = min(nrow, num_samples)
num_rows = int(math.ceil(float(num_samples) / nrow))
fig = plt.figure(figsize=(5 * num_cols, 2.5 * num_rows))
for (i, idx) in enumerate(indices):
# extract original, reconstruction, and label
original = originals[idx]
reconstruction = reconstructions[idx]
label = labels[idx]
# configure subplot
plt.subplot(num_rows, num_cols, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.title(f"Label: {label.item()}", fontweight='bold')
# plot original and reconstructed image
# design a grid, similar to torchvision.utils.make_grid
grid = torch.ones(1, 1, 32, 62)
grid.narrow(2, 2, 28).narrow(3, 2, 28).copy_(
original.view(-1, 1, 28, 28))
grid.narrow(2, 2, 28).narrow(3, 32, 28).copy_(
reconstruction.view(-1, 1, 28, 28))
plt.imshow(grid.squeeze().cpu().numpy(),
vmin=0.0, vmax=1.0, cmap='gray_r')
return fig
# + id="pVhQvVPmbMyy" tags=["keep"] colab={"base_uri": "https://localhost:8080/", "height": 594} executionInfo={"status": "ok", "timestamp": 1633499492731, "user_tz": -120, "elapsed": 288, "user": {"displayName": "Marcello Vendruscolo", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "10393122258563683124"}} outputId="a637e4b8-1bff-43be-a9b4-c4ebc5e59ac8"
plot_reconstruction(test_images, test_reconstruction, test_labels)
plt.show()
# + [markdown] id="bNR9-T54xh7l"
# ### Item (c)
#
# The comparison of the original images and their reconstructions provides us with some intuition for how much information is lost by the compression of the images to the two-dimensional latent space. As a less subjective measure we calculate the average squared reconstruction error. An advantage of an objective measure such as the average squared reconstruction error is that it enables us to compare the PCA with other models for dimensionality reduction.
# \begin{equation*}
# \mathrm{sqerr} := \frac{1}{10000} \sum_{i=1}^{10000} \|\mathbf{x}_i - \tilde{\mathbf{x}}_i\|^2_2
# \end{equation*}
# of the images $\mathbf{x}_i \in {[0,1]}^{784}$ and their reconstruction $\tilde{\mathbf{x}}_i \in \mathbb{R}^{784}$ ($i = 1,\ldots, 10000$) in the MNIST test data set.
#
# What average squared reconstruction error do you get with PCA?
# + id="Ho3lBFIyaKg6" tags=["keep"] colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1633500299300, "user_tz": -120, "elapsed": 308, "user": {"displayName": "Marcello Vendruscolo", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "10393122258563683124"}} outputId="036db55b-b5fb-4a94-db20-e25dee04c9de"
sqerr = torch.sum((test_images - test_reconstruction)**2)/len(test_images)
print(f'Average squared reconstruction error: {sqerr}')
# + [markdown] id="rqr1928yo9M0"
# ### Item (d)
#
# For each digit from 1 to 9, compute the mean of the data training points in the encoding space.
# + id="z-lL95Elxh7i" tags=["keep"]
train_mean_encodings = torch.zeros(10,2)
repetitions = torch.zeros(10)
for (encoding, label) in zip(train_encoding, train_labels):
repetitions[label] += 1
train_mean_encodings[label,:] += (encoding-storage[label,])/(repetitions[label])
# + [markdown] id="WZawT13Jxh7j"
# Generate the images corresponding to each of the means obtained in the encode space. Use the SVD matrix to convert the encoding space back to the image space. Use the the function `plot_images` bellow to generate a plot of that image.
# + id="MfZfs9rZxh7j" tags=["keep"]
def plot_images(images, labels=None, nrow=5):
# compute number of columns of grid
num_samples = images.size(0)
num_cols = min(nrow, num_samples)
num_rows = int(math.ceil(float(num_samples) / nrow))
fig = plt.figure(figsize=(2 * num_cols, 2 * num_rows))
for i in range(num_samples):
# extract image and labels if provided
image = images[i]
label = None if labels is None else labels[i]
# configure subplot
plt.subplot(num_rows, num_cols, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
if label is not None:
plt.title(f"Label: {label.item()}", fontweight='bold')
# plot image
plt.imshow(image.view(28, 28).cpu().numpy(),
vmin=0.0, vmax=1.0, cmap='gray_r')
return fig
# + id="wXhNi4p9xh7j" tags=["keep"] colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"status": "ok", "timestamp": 1633502791157, "user_tz": -120, "elapsed": 966, "user": {"displayName": "Marcello Vendruscolo", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "10393122258563683124"}} outputId="28789621-78a1-4282-db0e-97166f7ab9f9"
# compute mean images
train_mean_images = train_mean + train_mean_encodings.mm(V[:, :2].t())
plot_images(train_mean_images, torch.arange(10))
plt.show()
# + [markdown] id="kXOdBYQm1R29"
# ## Exercise 11.2 - Derivations for probabilistic PCA
#
#
# In constrast to (regular) PCA, the so-called probabilistic PCA (PPCA) allows a probabilistic
# interpretation of the principal components. The probabilistic formulation
# of PCA also allows us to extend the method and alter its underlying assumptions
# quite easily, as we will learn during the course of this laboratory.
#
#
# As before, let $\mathbf{x} \in \mathbb{R}^D$ represent a data sample that we
# want to decode from a lower dimensional representation
# $\mathbf{z} \in \mathbb{R}^M$ with $M < D$. The PPCA model assumes that
# $\mathbf{z}$ is standard normally distributed and $\mathbf{x}$
# can be decoded by a noisy linear transformation of $\mathbf{z}$.
# Mathematically, the model is given by
# \begin{align*}
# p(\mathbf{x} \,|\, \mathbf{z}) &= \mathcal{N}\left(\mathbf{x}; \mathbf{W}\mathbf{z} + \boldsymbol{\mu}, \sigma^2 \mathbf{I}_D\right), \\
# p(\mathbf{z}) &= \mathcal{N}(\mathbf{z}; \boldsymbol{0}, \mathbf{I}_M),
# \end{align*}
# with parameters $\mathbf{W} \in \mathbb{R}^{D \times M}$,
# $\boldsymbol{\mu} \in \mathbb{R}^D$, and $\sigma^2 > 0$.
# [Michael E. Tipping and Christopher M. Bishop show in "Probabilistic Principal Component Analysis"](https://www.jstor.org/stable/2680726)
# that for $\sigma^2 \to 0$
# the model recovers the standard PCA (but the components of $\mathbf{z}$ might
# be permuted).
#
# We assume that the data $\mathbf{x}_1, \ldots, \mathbf{x}_N$ is identically
# and independently distributed according to the PPCA model. In a maximum
# likelihood setting, one determines the parameters $\mathbf{W}$, $\boldsymbol{\mu}$,
# and $\sigma^2$ that maximize the likelihood
# \begin{equation*}
# p(\mathbf{x}_1, \ldots, \mathbf{x}_N ; \mathbf{W}, \boldsymbol{\mu}, \sigma^2)
# = \prod_{n=1}^N p(\mathbf{x}_n; \mathbf{W}, \boldsymbol{\mu}, \sigma^2),
# \end{equation*}
# or equivalently the log-likelihood
# \begin{equation*}
# \log p(\mathbf{x}_1, \ldots, \mathbf{x}_N; \mathbf{W}, \boldsymbol{\mu}, \sigma^2)
# = \sum_{n=1}^N \log p(\mathbf{x}_n; \mathbf{W}, \boldsymbol{\mu}, \sigma^2).
# \end{equation*}
# + [markdown] id="xQffYFcTy3jI"
# ### Item (a)
#
# *(Pen and paper exercise)*
#
# Show that for the model of the probabilistic PCA
# \begin{equation*}
# p(\mathbf{x}) = \mathcal{N}(\mathbf{x}; \boldsymbol{\mu}, \mathbf{C}),
# \end{equation*}
# where $\mathbf{C} = \mathbf{W}\mathbf{W}^\intercal + \sigma^2 \mathbf{I}_D$.
#
#
# + [markdown] id="3UCseyss1Vsu"
# ### Item (b)
# *(Pen and paper exercise)*
#
# Show that the distribution of the latent variable $\mathbf{z}$ conditioned on
# $\mathbf{x}$ is Gaussian as well and given by
# \begin{equation*}
# p(\mathbf{z} \,|\, \mathbf{x}) = \mathcal{N}\left(\mathbf{z}; \mathbf{M}^{-1} \mathbf{W}^\intercal (\mathbf{x} - \boldsymbol{\mu}), \sigma^2 \mathbf{M}^{-1} \right),
# \end{equation*}
# where $\mathbf{M} = \mathbf{W}^\intercal \mathbf{W} + \sigma^2 \mathbf{I}_M$.
#
#
#
# + [markdown] id="GR9ls8C-s_dd"
# ## Exercise 11.3 - Gaussian Mixture Models
#
#
# Consider the dataset generated bellow. It was generated using a gaussian mixture. That is the dataset is generated as follows:
#
# - We sample a variable `pp` from a bernouly distribution with probability `pi = 0.7`.
# - When the result is equal to one (`pp == 1`) we sample from the normal:
# $$\mathcal{N}\left(\mu_1 = \begin{bmatrix} 1 \\ 2\end{bmatrix}, \Sigma_1 = \begin{bmatrix} 2 & 0 \\ 0 & 1 \end{bmatrix}\right)$$
#
# - When the result is equal to one (`pp == 0`) we sample from the normal:
# $$\mathcal{N}\left(\mu_2 = \begin{bmatrix} 5 \\ 7\end{bmatrix}, \Sigma_2 = \begin{bmatrix} 2 & 1 \\ 1 & 2 \end{bmatrix}\right)$$
#
#
# In this exercise, you will use an expectation maximization algorithm to estimate the parameters of gaussian mixture model to fit this dataset. Using what you implemented, you will apply this model to the two dimensional encondings from the exercise 11.1.
# + id="YHHYEKlcs_TX"
from scipy.stats import multivariate_normal, uniform, bernoulli
def plot_2dnormal(ax, vnormal, color='red'):
# Plot center
plt.plot(vnormal.mean[0], vnormal.mean[1], '*', ms=5, color=color)
# Get countour grid
x, y = np.mgrid[-3:3:.01, -3:3:.01]
# rescale x, y
x = vnormal.cov[0,0] * x + vnormal.mean[0]
y = vnormal.cov[1,1] * y + vnormal.mean[1]
# Plot countour
pos = np.dstack((x, y))
ax.contour(x, y, vnormal.pdf(pos), colors=color, levels=3)
# First normal distribution
mu1 = [1, 2]
cov1 = [[2, 0], [0, 1]]
n1 = multivariate_normal(mean=mu1, cov= cov1)
# Parameters of the second normal distribution
mu2 = [5, 7]
cov2 = [[2, 1], [1, 2]]
n2 = multivariate_normal(mean=mu2, cov=cov2)
# Probability of belonging to each
pi = 0.7
pp = bernoulli(p=pi)
odds = [pi, 1 - pi]
# Create a list with all the normals
vnormals = [n1, n2]
# Sample N=300 samples from each
N = 300
v1 = n1.rvs(N)
v2 = n2.rvs(N)
p = pp.rvs(N)
gmm_data = np.where(p[:, None] == 1, v1, v2)
fig, ax = plt.subplots()
ax.scatter(gmm_data[:,0], gmm_data[:,1])
plot_2dnormal(ax, n1, color='red')
plot_2dnormal(ax, n2, color='blue')
plt.show()
# + [markdown] id="fluE4c4ZCRx1"
# ### Item (a)
#
# Implement a function `e_step` that receives as input:
# - the `gmm_data`: which is an `300 x 2` np array generated above;
# - the `odds`: which is a list containing as elements the `pi` and `1 - pi`; and
# - the `vnormals`: a list `[n1, n2]` of variable generated using: (`multivariate_normal`)[https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.multivariate_normal.html]. See `vnormals` above.
#
# and outputs a vector of dimension `300 x 2 ` containing the probability of `pp = 1` for each of the points. This is equivalent, in each row:
# $$\left(\frac{\pi_1\mathcal{N}\left(x; \mu_1 , \Sigma_1\right)}{\sum_i \pi_i\mathcal{N}\left(x; \mu_i , \Sigma_i\right)}, \frac{\pi_2\mathcal{N}\left(x; \mu_2 , \Sigma_2\right)}{\sum_i \pi_i\mathcal{N}\left(x; \mu_i , \Sigma_i\right)}\right) $$
#
# This computation corresponds to expectation step in the expectation maximization algorithm.
# + id="pamMsGZ3Cdhx"
def e_step(gmm_data, odds, vnormals):
return ## TODO
# + [markdown] id="Ngfkjp6cBGzM"
# Bellow the function uses the values produced by such function to computed to decide whether a point from the data is associated with each center.
# + id="TlmAnsuLCfBR"
# appy the function to the dataset
ws = e_step(gmm_data, odds, vnormals)
# Use it to decide to which normal it belongs to
fig, ax = plt.subplots()
plot_2dnormal(ax, n1, color='black')
plot_2dnormal(ax, n2, color='black')
belong_to = ws.argmax(axis=0)
ax.scatter(gmm_data[belong_to==0,0], gmm_data[belong_to==0,1], color='red')
ax.scatter(gmm_data[belong_to==1,0], gmm_data[belong_to==1,1], color='blue')
plt.show()
# + [markdown] id="5a8a_4ny7IwG"
# ### Item (b)
#
# Implement a function `m_step` that receives as input:
# - the `gmm_data`: which is an `300 x 2` np array generated above;
# - the `ws`: which is an `300 x 2` np array giving in the first row the probability of a point belonging to the first normal in the mixture. And, in the second row, the probability of a point belonging to the first normal in the mixture. I.e. similar to the output of the function `e_step` implemented above.
#
# It outputs the parameters of the mixture:
# - the `odds`: which is a list containing as elements the `pi` and `1 - pi`; and
# - the `vnormals`: a list `[n1, n2]` of variable generated using: (`multivariate_normal`)[https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.multivariate_normal.html]. See `vnormals` above.
#
# This can be done using Eq. 10.16 from http://smlbook.org/.
# + id="aaqRa9mDFpxP"
def m_step(gmm_data, ws):
return ### TODO
# + [markdown] id="rvqfsNgWCt7N"
# You can use the values computed in the last few items as a sanity check:
#
#
# + id="JbLA6_i_GDQP"
odds, vnormals = m_step(gmm_data, ws)
print('odds:')
print(odds)
print('')
for i, rv in enumerate(vnormals):
print('Normal {}'.format(i+1))
print('- mean:')
print(rv.mean)
print('- cov:')
print(rv.cov )
print('')
# + [markdown] id="dq-x0rL8GPE5"
# If everything went right, you should obtain values close to the ones used to generate the dataset:
#
# - `odds` $\approx$ [0.7 0.3]
# - The first normal: $$\mu_1 = \begin{bmatrix} 1 \\ 2\end{bmatrix}, \Sigma_1 = \begin{bmatrix} 2 & 0 \\ 0 & 1 \end{bmatrix}$$
# - The first normal: $$\mu_2 = \begin{bmatrix} 5 \\ 7\end{bmatrix}, \Sigma_2 = \begin{bmatrix} 2 & 1 \\ 1 & 2 \end{bmatrix}$$
#
# + [markdown] id="5c31XGxiLb1D"
# ### Item (c)
# Use the code bellow to plot 3 iterations of the Expectation-Maximization algorithm in the dataset generated above.
# + id="moydTocLvWbQ"
# Initialization
mu1 = [0, 0]
cov1 = [[1, 0], [0, 1]]
n1_estimated = multivariate_normal(mean=[0, 0], cov= cov1)
# Parameters of the second normal distribution
mu2 = [10, 10]
cov2 = [[1, 0], [0, 1]]
n2_estimated = multivariate_normal(mean=mu2, cov=cov2)
vnormals_estimated = [n1_estimated, n2_estimated]
# Probability of belonging to each
odds_estimated = [0.5, 0.5]
for i in range(3):
fig, ax = plt.subplots()
ws = e_step(gmm_data, odds_estimated, vnormals_estimated)
belong_to = ws.argmax(axis=0)
ax.scatter(gmm_data[:, 0], gmm_data[:, 1], c=belong_to)
for j in range(ws.shape[0]):
plot_2dnormal(ax, vnormals_estimated[j], color='black')
odds_estimated, vnormals_estimated = m_step(gmm_data, ws)
plt.show()
# + [markdown] id="gIk96nSiPy1E"
# ### Item (d)
#
# Adapt the code above and apply the method to MNIST 2 dimensional encoding you generated in Exercise 11.1. You can choose how many centers you want to use and what initialization parameters.
# + id="mWSxgXA0P2wX"
| 25,482 |
/.ipynb_checkpoints/NBA_GET_TEAMS-checkpoint.ipynb | 8d0048be0cfc6d51b22ba94e4716fea5126ac2ff | [] | no_license | Peilin-D/NBA_Stats | https://github.com/Peilin-D/NBA_Stats | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 3,297 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <small><i>This notebook was prepared by [Rishi Rajasekaran](https://github.com/rishihot55). Source and license info is available on [Github](https://github.com/donnemartin/interactive-coding-challenges).</i></small>
# # Challenge Notebook
# ## Problem: Print all valid combinations of n-pairs of parentheses
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# * [Solution Notebook](#Solution-Notebook)
# ## Constraints
# * None
# ## Test Cases
#
# * 0 -> []
# * 1 -> [()]
# * 2 -> [(()), ()()]
# * 3 -> [((())), (()()), (())(), ()(()), ()()()]
# ## Code
# +
def parentheses_util(no_left, no_right, pair_string, result):
# TODO: implement parentheses pairing here
pass
def pair_parentheses(n):
result_set = set()
if n == 0:
return result_set
parentheses_util(n, n, '', result_set)
return result_set
# -
# ## Unit Test
# +
# # %load test_n_pairs_parentheses.py
from nose.tools import assert_equal
class TestPairParentheses(object):
def test_pair_parentheses(self, solution):
assert_equal(solution(0), set([]))
assert_equal(solution(1), set(['()']))
assert_equal(solution(2), set(['(())', '()()']))
assert_equal(solution(3), set(['((()))','(()())', '(())()', '()(())', '()()()']))
print('Success: test_pair_parentheses')
def main():
test = TestPairParentheses()
test.test_pair_parentheses(pair_parentheses)
if __name__ == '__main__':
main()
# -
# ## Solution Notebook
#
# Review the [Solution Notebook](#) for a discussion on algorithms and code solutions.
| 1,920 |
/newbridge/particlebox.ipynb | d6701ed7348b884ecebe7a5730ad33d4414ca499 | [] | no_license | hbhat4000/pathsamp | https://github.com/hbhat4000/pathsamp | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 7,564 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import spacy
from spacy.tokens import Span
from spacy.matcher import PhraseMatcher
nlp = spacy.load("pt_core_news_sm")
doc = nlp("Eu gosto muito de jogar a bola.")
print(doc.text)
animals = ["Golden Retriever", "cat", "turtle", "Rattus norvegicus"]
matcher = PhraseMatcher(nlp.vocab)
# -
def animal_component(doc):
animals = ["Golden Retriever", "cat", "turtle", "Rattus norvegicus"]
matcher = PhraseMatcher(nlp.vocab)
matches = matcher(doc)
span = [Span(doc, start, end, label="ANIMAL") for match_id, start, end in matches]
doc.ents = span
return doc
# +
nlp.add_pipe(animal_component, after=False)
time_spent = []
# +
from time import time
def timer(func):
def f(*args, **kwargs):
global time_spent
before = time()
rv = func(*args, **kwargs)
after = time()
time_spent[func.__name__] = format(after - before, '.2f')# round(int(after - before), 3)
return rv
return f
# +
@timer
def get_entities(doc):
return [(ent.text, ent.label_) for ent in doc.ents]
def get_verbs(doc):
return [(token.text, token.pos_) for token in doc if token.pos_ == "VERB" or token.pos_ == "AUX"]
@timer
def get_countries(doc):
return [(ent.text, ent.label_) for ent in doc.ents if ent.label_ == 'GPE']
def get_persons(doc):
return [(ent.text, ent.label_) for ent in doc.ents]
# +
TEXT = """
Eu gosto muito de jogar a bola com o meu amigo Manuel e o João.
"""
doc = nlp(TEXT)
print('PERSONS :', get_persons(doc))
print('VERBS :', get_verbs(doc))
print(time_spent)
# +
from transformers import pipeline
classifier = pipeline("sentiment-analysis")
# +
results = classifier(["We are very happy to show you the 🤗 Transformers library", "We hope you don't hate it."])
for result in results:
print(f"label: {result['label']}, with score: {round(result['score'], 4)}")
# +
import csv
p = '../dataset/datasets_73978_166769_fb_sentiment.csv'
tweets = []
with open(p, newline='') as f:
spamreader = csv.DictReader(f)
for row in spamreader:
tweets.append(row["FBPost"])
tweets[0]
# +
import pandas as pd
import collections
from collections import Counter
import numpy as np
review_subset = pd.read_csv('/home/yanick/KRAKEN/PIP/NLP/data/Yelp Dataset/yelp.csv')
start = 10
end = 20
print(len(review_subset)/10)
by_rating = [] #collections.defaultdict(list)
for _, row in review_subset[start:end].iterrows():
by_rating.append({
"review_id": row.review_id,
"text": row.text
})
by_rating
# +
results = classifier(" really I cant believe this place has received such high reviews from people\n\nmy lady and i walked in and were greeted rather rudely by a pretentious bitch at the front with a monotone name please instead of a warm friendly french welcome to this rather charming looking place we didnt have a reservation which from the looks of how dead empty the place was didnt seem like a problem until the hostess whisked through her reservation list and explained shed try to fit us in it was 6pm the place had one other couple dining and we were informed that the next party was arriving at 730 we werent really looking to hang out it would have be awfully lovely if yelpers would have informed of the need to MAKE RESERVATIONS before going in here because if you dont youre automatically a piece of shit according to the staff sorry i dont plan very much ahead next time i will and it wont be here\n\nanyways after debating for a minute or two with herself the hostess decided we could squeeze in and sat us right on top of the other couple inside the restaurant she informed us that we could cork our own wine for their low low price of 9 fucking dollars in words only office space could adequately describecoup des tartes what is it exactly that you do \n\nthe food was good nothing spectacular for 25 a plate we got a salad with apples and nuts and bleu cheese which was served warm now this is the first warm salad ive had all of my life and by no means am i some sort of wordly food expert but i believe salad should be cold and crisp not soggy and warm yuck the cordon bleu was tasty and filling however the wobbly table was an extreme annoyance when cutting through the chicken but it was solved with no thanks to the staff by placing a jackinthebox gift card under the leg i will admit the mashed potatoes are some of the creamiest and best ive ever had my lady got the pork tenderloin which had a strange pepperysweet pairing which i didnt particularly care for we were broke for desert so unfortunately we didnt get to try any tarts you would think for a place that promotes itself as being so poise would get some fancier menus other than a 15 piece of cardstock copied in bulk at kinkos\n\nthis place was rather disappointing i honestly thought it would be way more comfy and welcoming inside but it really wasnt it presents itself as being so high class and french and ohlala wee wee but when it comes down to it its still located in a tiny shack of a house at 16th street and highland maybe if it were located in the biltmore id give it a bit more cred ")
print("RESULT: ", results)
# +
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased-finetuned-mrpc")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased-finetuned-mrpc")
# +
from transformers import pipeline
from progress.bar import Bar
context = r"""
Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a
question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune
a model on a SQuAD task, you may leverage the examples/question-answering/run_squad.py script.
"""
bar = Bar('Processing', max=20)
loading = True
while loading:
bar.next()
nlp = pipeline("question-answering")
loading = False
bar.finis
print(type(nlp))
result = nlp(question="What is extractive question answering?", context=context)
print(result)
# -
nlp = pipeline("ner")
# +
from transformers import pipeline
summarizer = pipeline("summarization")
# +
ARTICLE = """ New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York.
A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband.
Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared "I do" five more times, sometimes only within two weeks of each other.
In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her "first and only" marriage.
Barrientos, now 39, is facing two criminal counts of "offering a false instrument for filing in the first degree," referring to her false statements on the
2010 marriage license application, according to court documents.
Prosecutors said the marriages were part of an immigration scam.
On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further.
After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective
Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002.
All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say.
Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages.
Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted.
The case was referred to the Bronx District Attorney\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\'s
Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt, Turkey, Georgia, Pakistan and Mali.
Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force.
If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18.
"""
#s = summarizer(ARTICLE, max_length=118, min_length=30, do_sample=False)
#print(s[0]["summary_text"])
# +
ARTICLE = """
In order to analyze what has been done in Cape Verde with regard to Intellectual Property
and also Digital Signature, we interviewed the competent entities related to each particular subject
and conducted interviews and applied questionnaires that we sent through electronic mails
and social networks to our artists and authors both in Cape Verde and some who are living
abroad. With the data collected it was possible to verify that the protection of Intellectual
Property is still in a phase of maturation in our archipelago and having been given more
attention in recent years.
"""
#print(len(ARTICLE))
#print(summarizer(ARTICLE, max_length=118, min_length=30, do_sample=False))
# +
import string
s = "@Maria: Do you mean the Nook? Be careful, books you buy for the Kindle are for that piece of electronics, and vice versa. I love my Kindle, there are people that swear by the Nook. They like the color screen.Me? I want an ereader that is a reader-- so I dont need color. The kindle battery lasts longer, and the unit isnt as heavy, which can make a difference after reading for a few hours. :) " # Sample string
out = s.translate(str.maketrans('', '', string.punctuation))
doc = nlp(s.replace("@", ""))
print()
print('Entities: ', get_entities(doc))
print('Countries :', get_countries(doc))
print('PERSON :', get_persons(doc))
# +
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
import torch
tokenizer = AutoTokenizer.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
model = AutoModelForQuestionAnswering.from_pretrained("bert-large-uncased-whole-word-masking-finetuned-squad")
# -
def answer_question(question):
inputs = tokenizer(question, ARTICLE, add_special_tokens=True, return_tensors="pt")
input_ids = inputs["input_ids"].tolist()[0]
text_tokens = tokenizer.convert_ids_to_tokens(input_ids)
answer_start_scores, answer_end_scores = model(**inputs)
answer_start = torch.argmax(
answer_start_scores
)
answer_end = torch.argmax(answer_end_scores) + 1
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))
return answer
answer_question("How we did to analyze what has been done?")
# +
import numpy as np
from annoy import AnnoyIndex
p = '../dataset/glove.6B.200d.txt'
class PreTrainedEmbeddings(object):
def __init__(self, word_to_index, word_vectors):
"""
Args:
word (str)
"""
self.word_to_index = word_to_index
self.word_vectors = word_vectors
self.index_to_word = { v: k for k, v in self.word_to_index.items() }
self.index = AnnoyIndex(len(word_vectors[0]), metric='euclidean')
for _, i in self.word_to_index.items():
self.index.add_item(i, self.word_vectors[i])
self.index.build(100)
@classmethod
def from_embeddings_file(cls, embedding_file):
word_to_index = {}
word_vectors = []
with open(embedding_file) as fp:
for line in fp.readlines():
line = line.split(" ")
word = line[0]
vec = np.array([ float(x) for x in line[1:] ])
word_to_index[word] = len(word_to_index)
word_vectors.append(vec)
return cls(word_to_index, word_vectors)
def get_embedding(self, word):
"""
Args:
word (str)
Returns:
numpy.array
"""
return self.word_vectors[self.word_to_index[word]]
def get_closest_to_vector(self, vector, n=1):
"""Given a vector, return its n nearest neighbors
Args:
vector (np.array)
n (int) -> the number of neighbors to return
Returns:
[str,str...] -> words nearest to the given vector
"""
nn_indices = self.index.get_nns_by_vector(vector, n)
return [self.index_to_word[neighbor] for neighbor in nn_indices]
def compute_analogy(self, word1, word2, word3):
"""
Args:
word1 (str)
word2 (str)
word3 (str)
Returns:
closest_words (list) -> a list of nearest neighbors
"""
vec1 = self.get_embedding(word1)
vec2 = self.get_embedding(word2)
vec3 = self.get_embedding(word3)
# Symple hypothesis: Analogy is a spatial relationship
spatial_relationship = vec2 - vec1
vec4 = vec3 + spatial_relationship
closest_words = self.get_closest_to_vector(vec4, n=4)
existng_words = set([word1, word2, word3])
closest_words = [word for word in closest_words if word not in existng_words]
return closest_words
def compute_missing_word(self, text):
"""
Args:
text (str) -> Text containing blank spaces
"""
pass
# -
embeddings = PreTrainedEmbeddings.from_embeddings_file(p)
# +
word1, word2, word3 = 'man', 'doctor', 'woman'
closest_words = embeddings.compute_analogy(word1, word2, word3)
print(closest_words)
for word4 in closest_words:
print("{} : {} :: {} : {}".format(word1, word2, word3, word4))
# +
closest_words = embeddings.compute_and_print_analogy('fast', 'fastest', 'young')
for word4 in closest_words:
print("{} : {} :: {} : {}".format(word1, word2, word3, word4))
# +
closest_words = embeddings.compute_and_print_analogy('talk', 'communicate', 'read')
for word4 in closest_words:
print("{} : {} :: {} : {}".format(word1, word2, word3, word4))
# +
from tqdm.notebook import trange, tqdm
from time import sleep
for i in trange(3, desc='1st loop'):
embeddings = PreTrainedEmbeddings.from_embeddings_file(p)
# +
from nltk.tokenize import TweetTokenizer
tokenizer = TweetTokenizer()
#tokenizer.tokenize("A Jupyter widget could not be displayed because the widget state could not be found. This could happen if the kernel storing the widget is no longer available, or if the widget state was not saved in the notebook. You may be able to create the widget by running the appropriate cells.")
# +
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
# -
class TransformerModel(nn.Module):
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerModel, self).__init__()
from torch.nn import TransformerEncoder, TransformerEncoderLayer
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.init_weights()
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
if self.src_mask is None or self.src_mask.size(0) != src.size(0):
device = src.device
mask = self._generate_square_subsequent_mask(src.size(0)).to(device)
self.src_mask = mask
src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask)
output = self.decoder(output)
return output
# +
import torchtext
from torchtext.data.utils import get_tokenizer
TEXT = torchtext.data.Field(tokenize=get_tokenizer("basic_english"),
init_token='<sos>',
eos_token='<eos>',
lower=True)
train_txt, val_txt, test_txt = torchtext.datasets.WikiText2.splits(TEXT)
TEXT.build_vocab(train_txt)
device = torch.device("cuda" if torch.cuda.is_available else "cpu")
# +
def batchify(data, bsz):
data = TEXT.numericalize([data.examples[0].text])
nbatch = data.size(0) // bsz
data = data.narrow(0, 0, nbatch * bsz)
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
batch_size = 20
eval_batch_size = 10
train_data = batchify(train_txt, batch_size)
val_data = batchify(val_txt, eval_batch_size)
test_data = batchify(test_txt, eval_batch_size)
# -
bptt = 35
def get_batch(source, i):
seq_len = min(bptt, len(source) - 1 - i)
data = source[i:i+seq_len]
target = source[i+1:i+1+seq_len].view(-1)
return data, target
# +
ntokens = len(TEXT.vocab.stoi) # the size of vocabulary
emsize = 200 # embedding dimension
nhid = 200 # the dimension of the feedforward network model in nn.TransformerEncoder
nlayers = 2 # the number of nn.TransformerEncoderLayer in nn.TransformerEncoder
nhead = 2 # the number of heads in the multiheadattention models
dropout = 0.2 # the dropout value
model = TransformerModel(ntokens, emsize, nhead, nhid, nlayers, dropout).to(device)
criterion = nn.CrossEntropyLoss()
lr = 5.0 # learning rate
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)
import time
def train():
model.train() # Turn on the train mode
total_loss = 0.
start_time = time.time()
ntokens = len(TEXT.vocab.stoi)
for batch, i in enumerate(range(0, train_data.size(0) - 1, bptt)):
data, targets = get_batch(train_data, i)
optimizer.zero_grad()
output = model(data)
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
total_loss += loss.item()
log_interval = 200
if batch % log_interval == 0 and batch > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | '
'lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // bptt, scheduler.get_last_lr()[0],
elapsed * 1000 / log_interval,
cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
def evaluate(eval_model, data_source):
eval_model.eval() # Turn on the evaluation mode
total_loss = 0.
ntokens = len(TEXT.vocab.stoi)
predictions = []
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, bptt):
data, targets = get_batch(data_source, i)
output = eval_model(data)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
return total_loss / (len(data_source) - 1)
# +
best_val_loss = float("inf")
epochs = 3
best_model = None
for epoch in range(1, epochs + 1):
epoch_start_time = time.time()
train()
val_loss = evaluate(model, val_data)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} |'
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
print('-' * 89)
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = model
scheduler.step()
# -
test_loss = evaluate(best_model, test_data)
print("=" * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print("=" * 89)
# +
from string import punctuation
vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}
def tokenize_review(test_review):
test_review = test_review.lower() # lowercase
# get rid of punctuation
test_text = ''.join([c for c in test_review if c not in punctuation])
# splitting by spaces
test_words = test_text.split()
print(test_words)
# tokens
test_ints = []
test_ints.append([vocab_to_int[word] for word in test_words])
return test_ints
def predict(input_word):
best_model.eval()
word_ints = tokenize_review(input_word)
predict("Hi, there how")
| 22,194 |
/Task - 5 (Python Programming)/Task - 5 (Python Programming).ipynb | fb91f466cdd9a3eef808e2b423afa7decb02579a | [] | no_license | Soyel-Shaikh/Innomatics_Internship_APR_21 | https://github.com/Soyel-Shaikh/Innomatics_Internship_APR_21 | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 16,460 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Question 1 Detect Floating Point Number
import re
for _ in range(int(input())):
print(bool(re.match(r'^[-+]?[0-9]*\.[0-9]+$', input())))
# +
# Question 2 Re.split()
regex_pattern = r"[.,]+" # Do not delete 'r'.
import re
print("\n".join(re.split(regex_pattern, input())))
# +
#Question 3 Group(), Groups() & Groupdict()
import re
m = re.search(r'([A-Za-z0-9])\1+',input())
if m:
print(m.group(1))
else:
print(-1)
# +
# Question 4 Re.findall() & Re.finditer()
import re
k = re.findall(r'(?<=[^aeiouAEIOU ])[aeiouAEIOU]{2,}(?=[^aeiouAEIOU ])',input())
if k:
for i in k:
print(i)
else:
print(-1)
# +
# Question 5 Re.start() & Re.end()
import re
S, k = input(), input()
matches = re.finditer(r'(?=(' + k + '))', S)
anymatch = False
for match in matches:
anymatch = True
print((match.start(1), match.end(1) - 1))
if anymatch == False:
print((-1, -1))
# +
# Question 6 Regex Substitution
import re
T = []
def change(match):
if match.group(0) == '&&':
return 'and'
elif match.group(0) == '||':
return 'or'
N = int(input())
for i in range(N):
T.append(input())
t = '\n'.join(T)
print(re.sub(r"(?<= )(&&|\|\|)(?= )", change, t))
# +
#Question 7 Validating Roman Numerals
regex_pattern = r"^(?=[MDCLXVI])M{0,3}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$" # Do not delete 'r'.
import re
print(str(bool(re.match(regex_pattern, input()))))
# +
#Question 8 Validating phone numbers
import re
N = int(input())
for i in range(N):
number = input()
if(len(number)==10 and number.isdigit()):
output = re.findall(r"^[789]\d{9}$",number)
if(len(output)==1):
print("YES")
else:
print("NO")
else:
print("NO")
# +
# Question 9 Validating and Parsing Email Addresses
import re
N = int(input())
for i in range(N):
name, email = input().split()
pattern="<[a-z][a-zA-Z0-9\-\.\_]+@[a-zA-Z]+\.[a-zA-Z]{1,3}>"
if bool(re.match(pattern, email)):
print(name,email)
# +
# Question 10 Hex Color Code
import re
n = int(input().strip())
inside = False
for _ in range(n):
line = input()
for el in line.split(' '):
if el == "{":
inside = True
continue
elif el == "}":
inside = False
continue
elif inside:
found = re.search(r'\#[0-9a-fA-F]{3,6}', el)
if found:
print(found.group(0))
# +
# Question 11 HTML Parser - Part 1
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print ('Start :',tag)
for attr in attrs:
print ('->',' > '.join(map(str,attr)))
def handle_endtag(self, tag):
print ('End :',tag)
def handle_startendtag(self, tag, attrs):
print ('Empty :',tag)
for attr in attrs:
print ('->',' > '.join(map(str,attr)))
html = ""
for i in range(int(input())):
html += input()
parser = MyHTMLParser()
parser.feed(html)
parser.close()
# +
# Question 12 HTML Parser - Part 2
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_comment(self, data):
if '\n' not in data:
print('>>> Single-line Comment')
print(data)
elif '\n' in data:
print('>>> Multi-line Comment')
print(data)
def handle_data(self, data):
if data != '\n':
print('>>> Data')
print(data)
html = ""
for i in range(int(input())):
html += input().rstrip()
html += '\n'
parser = MyHTMLParser()
parser.feed(html)
parser.close()
# +
# Question 13 Detect HTML Tags, Attributes and Attribute Values
from __future__ import print_function
import sys
if sys.version_info[0]>=3:
input=input
from html.parser import HTMLParser
from html.parser import HTMLParser
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print(tag)
for e in attrs: print("-> "+e[0]+" > "+str(e[1]))
def handle_endtag(self, tag):
#print("End : "+tag)
pass
def handle_startendtag(self, tag, attrs):
print(tag)
for e in attrs: print("-> "+e[0]+" > "+str(e[1]))
parser = MyHTMLParser()
for _ in range(int(input())):
parser.feed(input())
# +
# Question 14 Validating UID
import re
for i in range(int(input())):
N = input().strip()
if N.isalnum() and len(N) == 10:
if bool(re.search(r'(.*[A-Z]){2,}',N)) and bool(re.search(r'(.*[0-9]){3,}',N)):
if re.search(r'.*(.).*\1+.*',N):
print ('Invalid')
else:
print ('Valid')
else:
print ('Invalid')
else:
print ('Invalid')
# +
# Question 15 Validating Credit Card Numbers
import re
for i in range(int(input())):
S = input().strip()
pre_match = re.search(r'^[456]\d{3}(-?)\d{4}\1\d{4}\1\d{4}$',S)
if pre_match:
processed_string = "".join(pre_match.group(0).split('-'))
final_match = re.search(r'(\d)\1{3,}',processed_string)
if final_match:
print('Invalid')
else :
print('Valid')
else:
print('Invalid')
# +
# Question 16 Validating Postal Codes
regex_integer_in_range = r"^[1-9]\d{5}$" # Do not delete 'r'.
regex_alternating_repetitive_digit_pair = r"(\d)(?=\d\1)" # Do not delete 'r'.
import re
P = input()
print (bool(re.match(regex_integer_in_range, P))
and len(re.findall(regex_alternating_repetitive_digit_pair, P)) < 2)
# +
# Question 17 Matrix Script
import math
import os
import random
import re
import sys
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
m = int(first_multiple_input[1])
matrix = []
for _ in range(n):
matrix_item = input()
matrix.append(matrix_item)
matrix = list(zip(*matrix))
sample = str()
for words in matrix:
for char in words:
sample += char
print(re.sub(r'(?<=\w)([^\w\d]+)(?=\w)', ' ', sample))
# -
| 6,329 |
/content/00_prerequisites/00_introduction_to_python/colab.ipynb | 3d727fa5a8e3601a250497a0dc1bc86db76a2ccb | [] | no_license | sultanalnahian/Coursera_MachineLearning-Regression | https://github.com/sultanalnahian/Coursera_MachineLearning-Regression | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 95,470 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sultanalnahian/Coursera_MachineLearning-Regression/blob/master/content/00_prerequisites/00_introduction_to_python/colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="copyright"
# #### Copyright 2020 Google LLC.
# + id="vUL-MxtcowtQ"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + id="RPzkgaO1IzxD"
"TEST"
# + [markdown] id="ZutSxI49o8Jh"
# # Introduction to Python
# + [markdown] id="ZutSxI49o8J5"
# Python is the most common language used for machine learning. It is an approachable yet versatile language that's used for a variety of applications.
#
# It can take years to learn all the intricacies of Python, but luckily you can learn enough Python to become proficient in machine learning in a much shorter period of time.
#
# This Colab is a quick introduction to the core attributes of Python that you'll need to know to get started. This is only a brief peek into the parts of the language that you'll commonly encounter as a data scientist. As you progress through this course, we'll introduce you to the Python concepts you'll need along the way.
#
# If you already know Python, this lesson should be a quick refresher. You might be able to simply skip to the exercises at the bottom of the document.
#
# If you know another programming language, you will want to pay close attention because Python is markedly different than most popular languages in use today. If you are new to programming, welcome! Hopefully this lesson will give you the tools you need to get started with data science.
# + [markdown] id="nitMt74ZpjSN"
# ## Variables
#
# One of the most important notions in any programming language is the **variable**. Variables are how we store data we want to use later.
#
# When we have an expression like `2 + 3`, Python calculates the value of `5` and then forgets that the value ever existed. We need some way to store the values of expressions so that they can be used later in the program. To do that, we use variables.
#
# In the example below, the value of `2 + 3` is stored in the variable `x`.
# + id="rNMTYmdOuS1t"
x = 2 + 3
x
# + [markdown] id="eixDMWWW1oDo"
# Variables store values, and those values can be used in expressions.
# + id="mgUEjQbk1yEV"
x = 2
y = 3
x + y
# + [markdown] id="jC17KacSEvdZ"
# Variable value can also be changed, hence the name *variable*.
# + id="2wYs4zbuE6rA"
x = 123
x = 456
x
# + [markdown] id="mT_0fsbuFses"
# There are very few rules regarding what you can name a variable. The first character needs to be an alphabetic character. After the first character, any alphanumeric character can be used. Underscores are also okay to use anywhere in a variable name, but stay away from naming a variable with two underscores at the beginning, since Python uses leading double-underscores for internal things. (This will be seen in a later lesson.)
#
# Here are a few valid variable names:
# + id="_EVPT2D-GbUp"
number = 1
my_number = 2
YourNumber = 3
_the_number_four = 4
n5 = 5
NUMBER = 6
number + my_number + YourNumber + _the_number_four + n5 + NUMBER
# + [markdown] id="6LYdiJ4DGyy5"
# Notice that `number` and `NUMBER` are different variables. Case matters.
#
# Although Python will accept other styles, it is common convention to name constants in all caps (e.g. `THE_NUMBER`) and variables using lower_with_underscore syntax (e.g. `a_number`). See the [Python Style Guide](https://www.python.org/dev/peps/pep-0008/#naming-conventions) for more on naming conventions. Adhere to the guide when you can. It will help others understand your code and it will train you to be able to read other programmers' Python code.
# + [markdown] id="n6232TxSHCdA"
# Naming variables is an important aspect of computer programming. Variables serve as a form of documentation within your code. Good names will help your teammates and your future self understand what your code is doing when they are trying to modify it. Take some time to think about your variable names as you create new variables.
#
# It is also important to keep your variable naming style consistent. Don't use different naming styles (such as `this_variable` and `thisVariable`) in the same file/package unless you have a valid reason to do so.
# + [markdown] id="EpzRj0xf7_Qf"
# ## Printing and Strings
# + [markdown] id="z953Qlkk7RvE"
# ### Strings
#
# The **string** data type can contain zero or more characters. In order for Python to know that a string is a value and not part of the code, strings have to be wrapped in quotes.
#
# Some example strings are:
# + id="bkdwYrWTDdSu"
"Python is a "
'useful programming language'
# + [markdown] id="oG8rzEjCIrFN"
# Single and double-quotes are interchangeable, and both occur frequently. In general, try to pick one style of quotation marks and stick with it, unless you need to use that type of quote in a string.
#
# The following string requires the `'` character within the string, so it makes sense to use `"` as the quotation marks.
# + id="7i7kcmjWJOiq"
"I've been learning Python"
# + [markdown] id="s23P6KyYJXmB"
# But what if you need to use both `'` and `"` in the same string? In that case you can *escape* the embedded quote using the `\` character. The `\` tells Python to read the character `'` as it is, and not as the end of the quotation.
# + id="4ko6NWgQJet7"
'They\'ve had a "good" trip'
# + [markdown] id="2oQvnltzJ0ii"
# You probably noticed that the escape character, `\`,appears in the output. This is just a side effect of how Colab is printing the string. We'll learn a cleaner method of printing a string soon.
# + [markdown] id="S57KwUN6KYlc"
# The triple-quote (`"""` or `'''`) is another type of quote that you can use to create a string. The triple-quote allows you to have a multi-line string. It is often used when writing documentation directly in your code.
# + id="kENH5-B_Ky3j"
"""This is a string
surrounded by three double-quotes
and spanning multiple lines.
"""
'''This is a string
surrounded by three single-quotes
and spanning multiple lines.
'''
# + [markdown] id="W2DnBnAZK2Hh"
# You can see in the output that the string shows up on one line with `\n` added where the line breaks were. `\n` is a special escape sequence that means "line feed", which is typewriter-speak for moving to the next line. `\t` is another common escape sequence that represents a tab. `\\` adds a backslash `\` to the string.
# + [markdown] id="CqrJdWZMLzYt"
# Strings can be stored in variables. The `+` operator also works on strings, by concatenating them together.
# + id="ATDV6CX_MDD6"
s1 = "The Python "
s2 = "programming"
s3 = " language is easy to learn"
s1 + s2 + s3
# + [markdown] id="f6yO48xYMQ8h"
# The `*` operator also works with strings, resulting in the string repeated multiple times. However, other arithmetic operators such as `-` and `/` don't work with strings.
# + id="tFEZNL-UMMuS"
'ABC ' * 5
# + [markdown] id="bmqwHLl6Mng8"
# Python has a handy built-in way to find the *length* of a string, which is the number of characters in the string.
# + id="L-aLHVrANalL"
len("pneumonoultramicroscopicsilicovolcanoconiosis")
# + [markdown] id="dG3TxrOxOEry"
# If you need to extract a specific character from a string, you can specify that character by *indexing* it. Notice that in Python, the first character of a string is at index 0. Most popular programming languages start counting at 0.
# + id="fym2JmR4O2O1"
"abcdefghijklmnopqrstuvwxyz"[1]
# + [markdown] id="mdsjwVSBO_kN"
# You can also extract a *slice* of a string. A slice is a portion of a string referenced by the starting and ending index. The starting index is inclusive, and the ending index is exclusive. The slice below returns a four-character string.
# + id="Catcl26sPPpK"
alphabet = "abcdefghijklmnopqrstuvwxyz"
alphabet[1:5]
# + [markdown] id="bv18nj19P0y6"
# Slices have some handy shortcuts if you want to start at the beginning or go all the way to the end of a string.
# + id="s4CTYF86P_8c"
alphabet[:3]
# + id="8PNkhfs-QHPg"
alphabet[23:]
# + [markdown] id="zMRmvX9qQkON"
# Strings are *objects* in Python. We won't get into the details of what an object is in this tutorial, other than to mention that objects can have functions and methods called on them using **dot notation**. Below is an example object call on the `alphabet` string, that converts the entire string to uppercase. Notice that the function is called `alphabet.upper()` instead of `upper(alphabet)`, as we saw with the `len()` function.
# + id="zQCvFzGUQg4r"
alphabet.upper()
# + [markdown] id="WXBznfKiRcsb"
# More information on string functionality in Python can be found in the [Python string](https://docs.python.org/3.7/library/string.html) documentation.
# + [markdown] id="7TXyYyUBv77C"
# ### Printing
#
# So far, we have relied on Colab to print out the data that we have been working with. This works fine for simple examples, but it doesn't work if you want to print multiple times in a code snippet.
#
# The **`print`** function allows you to print any data structure to the screen.
# + id="UeZabHvqv769"
my_variable = "I'm a variable"
print(my_variable)
print("Hello class!")
print(12345)
print(123.45)
print(['a', 3, 'element list'])
print(('a', 3, 'element tuple'))
print({"my": "dictionary"})
# + [markdown] id="1wopo_J1v766"
# You'll notice that `print` adds a new line to the output every time it is called. We can add an `end=''` argument to the `print` to take away the new line.
# + id="mrmKTVDXv760"
print("The magic number is ", end='')
print(42)
# + [markdown] id="IkXnWJ3Pv76y"
# There is another Python feature that, though not strictly just for printing, is commonly used when printing: **text formatting**.
#
# Text formatting allows you to print multiple values in a single `print` statement and to mix strings and numbers.
# + id="qDCoQSCFv76s"
print("%s says that the numbers %d and %f sum to %f" % ("Bob", 3, 5.1, 3 + 5.1))
# + [markdown] id="1Nnhnon_v76p"
# There is quite a bit going on in the code above, so let's break it down into pieces:
#
# * The first string we see in the `print` call is `"%s says that the numbers %d and %f sum to %f"`. This string contains `%s`, `%d`, and `%f` placeholders. These placeholders tell Python that we expect a string (`%s`), integer (`%d`), or floating-point number (`%f`) that we'll put in the string. (A floating-point number is any non-integer number.)
#
# * Next we see a percentage sign `%` after the string. This is the string formatting operator. It comes after a string and precedes the tuple containing the data that will populate the placeholders in the string.
#
# * Finally comes the tuple containing the data that will populate the placeholders. The data in the tuple should be in the same order that the placeholders appear in the string. For example, the second placeholder is `%d`, and the second item in the tuple is the integer `3`.
# + [markdown] id="PihhIEJSv76m"
# There is a more modern and object-oriented formatting function that can be used to achieve the same goal. Notice the placeholders in the string passed to the `print` function are all curly braces. Also, the floating-point values are truncated to the appropriate number of decimal points in the output.
# + id="44loLjJZv76W"
print("{} says that the numbers {} and {} sum to {}".format("Bob", 3, 5.1, 3 + 5.1))
# + [markdown] id="n0pk26GVMdAf"
# When the values you want to print are saved as variables, you can also pass them in the `print` function by typing `f` before the quotation marks and putting the variable names in the curly braces.
# + id="iSrBG3s8MtJz"
name = "Bob"
value1 = 3
value2 = 5.1
print(f"{name} says that the numbers {value1} and {value2} sum to {value1 + value2}")
# + [markdown] id="ED0MmXO9Ytkl"
# #### Exercise 1
# + cellView="form" id="AGaNNvtlHtA6"
#@markdown Run this cell before starting the exercise.
#@markdown It initializes the values of some hidden variables.
name = "Alex"
favorite_food = "chocolate"
# + [markdown] id="OvfsalseMDaq"
# We have hidden the values of variables `name` and `favorite_food` in the cell above. Use a `print` statement to find out their values.
# + [markdown] id="Pb4rwUm6qXA4"
# #####**Student Solution**
# + id="bOsixW2-MCfK"
# Use a print statement to show the values of the hidden variables
# "name" and "favorite_food".
print("My name is ____. I love eating ____.")
# + [markdown] id="d4veL73wGHpO"
# ---
# + [markdown] id="I7KYyR11pR7G"
# ## Basic Data Types
#
# This section will introduce you to some of the most common data types that you'll encounter. Data types are the foundational building blocks for any Python code.
# + [markdown] id="4ee4By1ppGSW"
# ### Integers
#
# Integers are just whole numbers. This includes positive whole numbers (1, 2, 3, ...), 0, and negative whole numbers (-1, -2, -3, ...). The cell below demonstrates the addition of two integers.
# + id="Y6ENY1AaCe-R"
42+8
# + [markdown] id="xa2r21GQDyX_"
# Python can do all the common operations you are familiar with.
# + [markdown] id="JL759v61C8WB"
# **Subtraction**
# + id="bmpIeEdCC0qy"
4-2
# + [markdown] id="MRcuhLf3DBYK"
# **Multiplication**
# + id="rFTc9fNiDLBO"
2*3
# + [markdown] id="MsSSofZeDUMQ"
# **Exponents**
# + id="CZrY6jcKTZ-g"
2**3
# + [markdown] id="iQ7VTcjQTwL0"
# **Division** is a bit more complicated. If a number doesn't divide evenly, such as `13 / 5`, we get what is called a floating-point number. We will talk about floating-points in a bit, but for now, we can use `//` when dividing, also known as "floor division". This operator removes any decimal remainder (e.g. 14.1 and 14.9 both become 14). To find this remainder, we can use a modulo operator, `%`.
# + id="5YSFvvusWbM5"
quotient = 14 // 4
remainder = 14 % 4
print(f"Quotient: {quotient}, Remainder: {remainder}")
# + [markdown] id="X2TdlbVkD8KL"
# Multiple mathematical operations can be combined, as below:
# + id="WeEzppbcXmcu"
7 ** 7 + 7 * 7 - 7 * 7 // 7
# + [markdown] id="soWd9ICNXksr"
# The code above can be a bit difficult to read. How does Python know which numbers to process first?
#
# Python enforces an order of precedence where operations like taking the exponent come before multiplication and division, which come before addition and subtraction. (See this guide to [order of operations](https://www.mathsisfun.com/operation-order-pemdas.html).) There are actually more operators than what we've seen so far and it can be hard to remember the order of operations, so when in doubt you can wrap parentheses around operations to make things clearer.
# + id="WUHW2o4BEeeg"
(7 ** 7) + (7 * 7) - ((7 * 7) // 7)
# + [markdown] id="kC10wZPyjLyC"
# You can change the order of the operations from the standard order by wrapping different expressions in parentheses. Run the code snippets above and below, and notice the different results despite having the same numbers and operators.
# + id="BZpQixwpFjFZ"
(7 ** (7 + 7)) * 7 - (7 * (7 // 7))
# + [markdown] id="fqQQ2FVTN0__"
# #### Exercise 2
# + [markdown] id="IAVzd_nmGRK2"
# **Four Fours Problem**
#
# Don't spend more than 15 minutes on this problem, but give it a few tries.
#
# Change the operations and add parenthesis as necessary to try and come up with as many numbers between 1 and 42 as you can.
# + [markdown] id="KoJgYJnAFv8G"
# ##### **Student Solution**
# + id="UiTK_43yGbRL"
4 + 4 + 4 + 4
# + [markdown] id="_MuiR_mXGCeH"
# ---
# + [markdown] id="sN83BIYvMlbJ"
# ### Floating-Point Numbers
# + [markdown] id="hE54jmP2LS1p"
# It might seem complicated, but Python has a few ways of representing numbers. You have already seen the integer data type. This section is about floating-point numbers.
#
# The different data types representing numbers have a different place in the world of computing. In some languages, it is quite challenging to move from one type of number to another (e.g. from an integer to a floating-point). In Python, the mixing of integers and floating-point numbers is more fluid. This is great most of the time, but can be problematic at times. It is important to be aware, especially when doing division, when you are working with floating-point numbers or integers.
# + [markdown] id="L-axSTVAfdPX"
# Earlier we looked at floor division using `//`. The cell below uses regular division using `/`.
# + id="5Xl53vhnDP04"
50 / 10
# + [markdown] id="StSlr3ENHczu"
# In this case we can see that there is a decimal point (`.`) in the output. This is generally how we'll know that we're working with floating-point numbers.
#
# You can force operations to be floating-point operations by including one floating-point number in the equation. Note the difference between the following two numbers.
# + id="HnX7JoF7IjL-"
print(2 + 3)
print(2.0 + 3)
# + [markdown] id="gcMocTbXQu4z"
# The cell below demonstrates a standard division of two floating-point values.
# + id="BI-TEL_ARVvM"
322.231 / 0.03
# + [markdown] id="DAdljqYvRfif"
# The result `10741.033333333333` is clearly a floating-point number, with a decimal portion of `.033333333333`. Here is what happens when we apply floor division to two floating-point values:
# + id="RMUAi_QgRl7V"
322.231 // 0.03
# + [markdown] id="eGF6o-DSRp8Q"
# Note that the result `10741.0` has a decimal portion of `.0` now, but the decimal point indicates that it is a floating-point value.
# + [markdown] id="uzHSFAAzqKgC"
# Python has a very useful package with more advanced math tools. To import this package, all you need to do is add the following line to your code.
# + id="qXY1A9pPqdzI"
import math
# + [markdown] id="9YCJ7qnbqjFH"
# This package gives us access to a bunch of useful things, such as trigonometric functions like sines and cosines, and mathematical constants like $\pi$ and $e$. To use them, we just type `math.` followed by the specific function we want to use. In Colab, if you've already imported the library, typing `math.` should show a list of available functions.
#
# If you get an error running the following cell, make sure you run the cell above which imports the `math` package.
# + id="RWGjupl9rIho"
math.pi
# + [markdown] id="Qg3Om6uqSHYq"
# Here, `math.pi` ($\pi$) is represented as a floating-point number. Since $\pi$ is an irrational number, its value cannot be represented with a decimal, so the value of `math.pi` is not *exactly* the value of $\pi$. However for most use cases, it is close enough.
# + [markdown] id="_8UyzFRlPTZe"
# While computers cannot generate truly random numbers, computer scientists have come up with some very clever ways to get pseudo-random numbers. These numbers aren't perfectly random, but are usually close enough such that it doesn't matter. In Python, it is very easy to get a variety of pseudo-random values using the `random` library. It is imported using the same syntax as seen earlier to import the `math` library.
# + id="_cTqHwQRQ0kD"
import random
# + [markdown] id="XGe7XRfHRfqA"
# As we saw with the `math` library, any time we want to use a function from the library, we need to preface the function name with `random.`. To get a random floating-point value uniformly chosen between 0 and 10, we can type the following command.
#
# If you get an error running the following cell, make sure you run the cell above which imports the `math` package.
# + id="BWoP6GmDVseg"
random.uniform(0, 10)
# + [markdown] id="w9_mB1ZNar_K"
# Alternatively, we can get a random value between 0 and 1 using `random.random()` and simply multiply it by 10 to achieve the same thing. There are cases where one method might be preferable over the other, so it is useful to know them both.
# + id="hz9bF9bRbISv"
random.random()*10
# + [markdown] id="BRrRfDjdbR_A"
# There are a lot more kinds of random numbers that we can use, but we'll talk about that more a bit later.
# + [markdown] id="ZbyMX1f-UFxj"
# Don't be intimidated by the differences and interactions between floating-point numbers and integers. Most of the time you won't need to be concerned about it. However, if you see surprising results when doing math in Python, be sure to double-check the data types of the numbers in your operations.
# + [markdown] id="xhWi4A7G9sxP"
# ### Booleans
#
# The boolean is another core data type in Python. Booleans, often abbreviated to "bools", can be thought of as switches, with an "on" and an "off". Bools are represented simply by **True** and **False**.
# + id="8kK7X0KTh9cT"
True
# + id="yb8R0X5piBmy"
False
# + [markdown] id="GjxWzrHGiLvS"
# Bools are often combined with logical operators such as **`and`** and **`or`**. Suppose you have two boolean variables, `a` and `b`.
#
# - `a and b` is `True` if and only if both `a` and `b` are `True`, otherwise it is `False`.
# - `a or b` is `True` if and only if at least one of `a` or `b` is `True`, otherwise it is `False`.
# + id="ku0QRmgOiwqi"
True and True
# + id="n5jaK8mAi1kr"
True and False
# + id="1QsUSB_HjFQi"
False and True
# + id="kbFv2tF2i5uC"
False and False
# + id="TvaiLtfIjCHK"
True or True
# + id="fNC2DXODjO1j"
True or False
# + id="E4Vsi4yyjRIU"
False or True
# + id="dPHZfa0SjTpi"
False or False
# + [markdown] id="CTFYOYm-jl8s"
# These expressions of *truthiness* can be expanded beyond two operands:
# + id="dHDN9rlHju1K"
False and True or True and False or True
# + [markdown] id="EWds8NZcjzLm"
# Just like with numbers, you can change the order in which the boolean operators are conducted, with parentheses.
# + id="bmNET_M0j8n6"
False and (True or True and False or True)
# + [markdown] id="a6qGkJawkKOy"
# Truthiness can be flipped with the **`not`** operator:
# + id="0QuMqZr-kNuq"
not True
# + id="ktrkaFPvkP3C"
not False
# + [markdown] id="MAMZiWVKkRyj"
# In most cases, you won't be working directly with **`True`** and **`False`**. Most of the time, these values will be returned from other expressions. Take for instance the greater than ($>$), less than ($<$), greater than or equal to ($\geq$), and less than or equal to ($\leq$) expressions below:
# + id="kvrU-MbntJxF"
2 > 1
# + id="jC3Tu27etMz9"
2 < 1
# + id="dyAbguAZtStV"
1 >= 1
# + id="lV4LB8yltXX0"
2 <= 1
# + [markdown] id="iM0WzNIzt2e6"
# There are also checks for equality and inequality:
# + id="aWh_nf2Ct6BF"
1 == 2
# + id="DqD57oCjt9Fc"
1 != 2
# + [markdown] id="bPxW9UbyuOM8"
# Why is "equals" represented as `==` instead of just `=`? As you saw earlier with variables, you can use `=` to assign a value to a variable. Therefore, Python needs a different symbol to check for equality. If you want to assign the variable `a` to the value 3, you can use `a = 3`. Now, if you want to check if it is equal to 3, you can use `a == 3`.
# + id="LvdgS_NdNfah"
a = 3
print(a)
print(a == 3)
print(a == 2)
a = 2
print(a)
print(a == 2)
# + [markdown] id="xCtBPbjZteEN"
# You can combine the logical **`and`**, **`or`**, and **`not`** expressions and the **`>`**, **`>=`**, **`<`**, **`<=`**, **`==`**, **`!=`** expressions. Parentheses are again used to change the order of operation evaluation.
# + id="31SI26qkutT9"
(1 < 2) and (3 == 3) or ((4 > 1) and (not 1 < 2))
# + [markdown] id="v7Ia_42jhGza"
#
# We can also use the **`>`**, **`>=`**, **`<`**, **`<=`**, **`==`**, **`!=`** expressions with two strings to determine whether strings are in alphabetical order.
# + id="LKzFY0yohd2V"
'apple' < 'banana'
# + [markdown] id="OQ0u4X2Mhs3I"
# Of note is that capital letters are sorted before lowercase e.g. `'A' < 'a'`.
# + id="SlrWHza2Of1e"
'Apple' < 'apple'
# + [markdown] id="l7ev4qLUWRT6"
# #### Exercise 3
# + cellView="form" id="QFOF5-zrWRUB"
#@markdown Run this cell before starting the exercise.
#@markdown It initializes the values of some hidden variables.
favorite_letter = 'K'
favorite_number = 42
# + [markdown] id="y8vtK6YbWRUI"
# We've hidden the values of variables `favorite_letter` and `favorite_number` (an integer between 0 and 100). Use boolean expressions to find the values of the letter and the number.
#
# + [markdown] id="81jAhKnrWRUI"
# ##### **Student Solution**
# + id="PRR6zg7CWRUJ"
# Edit the expression below until you discover the hidden letter
favorite_letter == 'A'
# + id="chMvU3wSWRUM"
# Edit the expression below until you discover the hidden number
favorite_number <= 100
# + [markdown] id="4M-iD5qGWRUQ"
# ---
# + [markdown] id="j0CX7jke7lqR"
# ## Conditional Decisions
#
# One of the most common patterns in computer science and data science is the `if` statement. You can use `if` to check if a condition is met, and do different things based on whether it is met or not. The `if` statement looks at a boolean value and if that value is `True`, runs some code.
# + id="zuF7LVOS7FhQ"
if 1 > 3:
print("One is greater than three")
if 1 < 3:
print("One is less than three")
# + [markdown] id="Cv30H9ZrB1cI"
# Only the second `print` statement is run, since it is not `True` that `1 > 3` but it is `True` that `1 < 3`.
#
# Notice that the two `print` statements are indented beneath each `if` statement. This isn't by accident. Python creates "blocks" of code using the code's indentation level. This indentation can be done with tabs or with spaces, but it must be consistent throughout your code file.
#
# ```
# block 1
# block 1.1
# block 1.1.1
#
# block 1.1.1
#
# block 1.1
# ```
#
# The code below shows blocks in action.
# + id="gBDfIw-bErkp"
if False:
print("This shouldn't print.")
print("But this will always print.")
# + [markdown] id="RXYHdR-JFiET"
# In many situations, you will want to run some code if a condition is met, and some different code when it is not. For this, you can use `else`.
# + id="5SsJLEarF3kZ"
if 1 > 3:
print("Math as we know it is broken!")
else:
print("Everything looks normal.")
# + [markdown] id="HyMZNuRjF_J6"
# You might also want to check many if conditions and only execute the code if one condition passes. For this, you can use the `elif` clause (short for "else if").
#
# For example, these would be useful if we wanted to do a simple rock, paper, scissors game.
# + id="nTi0_DBTGJsK"
# Choose a random option from the list for the computer player.
import random
computer = random.choice(["rock", "paper", "scissors"])
my_choice = "paper" # Feel free to change this.
print(f"You chose {my_choice}!")
print(f"The computer chose {computer}!")
if my_choice == computer:
print("Draw! Go again!")
elif my_choice == "rock" and computer == "paper":
print("The computer wins. Try again?")
elif my_choice == "rock" and computer == "scissors":
print("You smashed the computer's scissors!")
elif my_choice == "paper" and computer == "rock":
print("You wrapped up the computer's rock!")
elif my_choice == "paper" and computer == "scissors":
print("The computer wins. Try again?")
elif my_choice == "scissors" and computer == "rock":
print("The computer wins. Try again?")
elif my_choice == "scissors" and computer == "paper":
print("You sliced up the computer's paper!")
# + [markdown] id="jlkFACKQoKyL"
# ## Basic Data Structures
# + [markdown] id="4aeUPXVUpoVb"
# ### Lists
# + [markdown] id="hvrg40h6Sacg"
# So far, the data types we've seen can be thought of as singular entities. We've seen:
# - strings
# - integers
# - floating-points
# - booleans
#
# Often, you'll find yourself needing to work with multiple data elements together. There are several options for organizing a collection of data into a data-structure. One option is to use a list.
#
# A list is just a list of multiple values.
# + id="0c7EWoTkTBXc"
[9, 8, 7, 6, 5]
# + [markdown] id="ffB5qW6iTEkp"
# The values in a list don't need to have the same data type.
# + id="d9UaR3X8THH-"
[True, "Shark!", 3.4, False, 6]
# + [markdown] id="RaFVKpWYTMoV"
# You can assign a list to a variable.
# + id="GxoedcOvTdBb"
my_list = [True, "Shark!", 3.4, False, 6]
my_list
# + [markdown] id="UzZ3LCjiUnj-"
# You can also index a list and take slices from it, just like you can from a string. Conceptually you can think of "a string" to be a sequence of characters similar to a list.
# + id="1FGtpudFU9FV"
print(my_list[3])
print(my_list[3:])
# + [markdown] id="tLH5kW4pXodm"
# Indexing can also be used to selectively replace items in a list.
# + id="aVhNNk3EXtAX"
print(my_list)
my_list[1] = "Wolf!"
print(my_list)
# + [markdown] id="rXjH5eZ0VDGo"
# Lists have other interesting features. For example, there is an in-built method to sort a list.
# + id="NsDXdr4OVINd"
number_list = [4, 2, 7, 9 ,3, 5, 3, 2, 9]
number_list.sort()
number_list
# + [markdown] id="0VK9yibsdHpl"
# You can append an item to a list using `append()`.
# + id="rUs-ifRpdLmY"
letter_list = ["a", "b", "c"]
letter_list.append("d")
letter_list
# + [markdown] id="NBMEjCAmd1C3"
# You can append multiple items to a list using `extend()`.
# + id="YOMz3t0Fd5E_"
letter_list.extend(["e", "f", "g"])
letter_list
# + [markdown] id="lZTvQem9VUQk"
# You can even have lists within lists!
# + id="oLhCs9bUVXtD"
["List 1", ["List 2", 3, 4], False]
# + [markdown] id="z1igsdg_Vfaa"
# Lists-of-lists come in really handy, especially in data science since much of the data that you'll work with will be in a tabular format. In these cases, the internal lists are typically the same size. For example, you might have a list of data points about a customer, such as their age, income, and the amount they spent at your company last month.
# + id="vCbbTrx8WCRb"
customers = [
["C0", 42, 56000, 12.30],
["C1", 19, 15000, 43.21],
["C2", 35, 123000, 45.67],
]
customers
# + [markdown] id="RJjfjKPXW3_b"
# You can use multiple indexing to get data out of a nested list. In the example below, we pull out the income of our second customer.
# + id="m3tVFhSpXOxU"
customers[1][2]
# + [markdown] id="np3pCakUWqzH"
# We will explore lists more deeply and other data structures in future tutorials.
# + [markdown] id="DaHEjvmwpqzT"
# ### Tuples
#
# Tuples look and feel a whole lot like lists in Python. They can contain a sequence of data of any type, including lists and other tuples. The primary difference between lists and tuples is that you can't modify a tuple like you can a list.
#
# Before we get too deep into immutability (whether you can change an object's value), let's take a look at a tuple.
# + id="s2wbBKB2YYr9"
my_tuple = (1, "dog", 3.987, False, ["a", "list", "inside", 1.0, "tuple"])
my_tuple
# + [markdown] id="5gTQ6Gx6Yp1g"
# The visible difference between a list and a tuple is that we create a tuple with parentheses instead of square brackets.
#
# You can index a tuple and take a slice from a tuple just like you can from a list.
#
# The only difference is that you can't change the values inside a tuple, like you can with a list. This is useful because Python can perform some optimizations when it knows a data structure can't change. This gives tuples a few powerful properties that lists don't have. We'll take a peek at one of these properties now, and we'll also learn more later in this tutorial.
#
# We will use a property of a tuple to swap the values of two variables. In most languages, you need three variables to swap the value of two variables. Here is an example.
# + id="0Ufn_-6vZfws"
var1 = "Python"
var2 = "Perl"
tmp = var1
var1 = var2
var2 = tmp
var1, var2
# + [markdown] id="e86S6sL9ZzwO"
# We had to introduce the `tmp` variable to perform the swap, and needed three lines of code. With tuples, we can do this more cleanly.
#
# **Note:** You might have noticed that when we put `var1, var2` at the bottom of the last code section a tuple was printed out. Any sequence of variables separated by commas in Python automatically creates a tuple.
# + id="kmbYg8KdaVNW"
var1 = "Perl"
var2 = "Python"
(var1, var2) = (var2, var1)
var1, var2
# + [markdown] id="UMvM-x4baz6o"
# As you can see, swapping variables using tuples is much easier to read and less error-prone than having to use three variables. It uses the property that the values in a tuple are **immutable**.
# + [markdown] id="OUI-5GqHbB5L"
# Tuples come up everywhere when programming in Python. Sometimes you won't even realize that you are working with a tuple, since they are so integrated with the language.
# + [markdown] id="AM2MY7EPpsS5"
# ### Dictionaries
#
# Dictionaries are another fundamental data structure in Python. If you have experience with other programming languages, you might have encountered a similar data structure with a different name such as a map, a hashmap, or a hashtable.
#
# Dictionaries contain key/value pairs. The reason this data structure is called a dictionary is because you can "look up" keys and find the corresponding value, just like you can look up a word in the Oxford Dictionary and find its definitions.
#
# Let's take a look at some code that creates a dictionary and accesses a value in the dictionary by key.
# + id="GkvFhLkguhln"
my_dictionary = {
"pet": "cat",
"car": "Tesla",
"lodging": "apartment",
}
my_dictionary["pet"]
# + [markdown] id="jhS92kCxu2N-"
# Notice that we use the same *indexing* notation that should be familiar to you from strings, lists, and tuples. However, instead of a numeric index, the look up is done by key.
#
# A key can be any non-mutable data value. Keys can be numbers, strings, and even tuples. You can't use a dictionary or list as a key, but you can use them as values.
#
# The data types of the keys do not need to be the same.
# + id="F7H45vAkvTc-"
the_dictionary = {
57: "the sneaky fox",
"many things": [1, "little list", " of ", 5.0, "things"],
(8, "ocho"): "Hi there",
"KEY_ONE": {
"a": "dictionary",
"as a": "value"
},
}
the_dictionary[(8, "ocho")]
# + [markdown] id="Ljuhxa7sv0Ze"
# The dictionary above is much more unstructured than dictionaries that you'll typically encounter in practice. However, it illustrates the broad range of key types and value types that a dictionary can store.
#
# You can also index many levels down in a dictionary. For example, in `the_dictionary` above, there is a sub-dictionary at the `KEY_ONE` key. Let's pull something out of this dictionary within a dictionary.
# + id="-9d8N3kxwgp_"
the_dictionary["KEY_ONE"]['as a']
# + [markdown] id="5faIdUYRwrZp"
# We can also use indexing to access the values of the list that is the value in `the_dictionary` for the key `"many things"`.
# + id="8QI8rJKMwtyH"
the_dictionary["many things"][1]
# + [markdown] id="gBwk3lyzxFlg"
# Dictionaries, lists, tuples, and other data structures can contain as much nesting as you need.
# + [markdown] id="FyKt13WvxO0K"
# Dictionaries store their values by key. Only one value can exist per key, so if you write a new value to a key, the old value goes away.
# + id="XjvSiO5pxaQb"
my_dictionary = {
"k1": "name",
"k2": "age"
}
my_dictionary["k1"] = "surname"
my_dictionary
# + [markdown] id="dySuvUTfxn-h"
# You can add entries to a dictionary by assigning them to a key.
# + id="KbMBUHKgxtWl"
my_dictionary["k3"] = "rank"
my_dictionary
# + [markdown] id="BAi6X1T9xznZ"
# And you can remove entries from a dictionary using the **`del`** operator.
# + id="DzeaUA_lx7Vh"
del my_dictionary["k2"]
my_dictionary
# + [markdown] id="DF9ipqh2yDwM"
# To see if a key exists in a dictionary, you can use the **`in`** operator. Notice that it returns a boolean value.
# + id="7-M5xn9DyJcS"
"k2" in my_dictionary
# + [markdown] id="mGe0UEq4yUaI"
# It is advisable to check if a key exists in a dictionary before trying to index that key. If you try to access a key that doesn't exist using square brackets, your program will throw an error and possibly crash.
#
# There is also a safer **`get`** method on the dictionary object. You provide `get` with a key and a default value to return if the key isn't present.
# + id="gm5yy399y2Hk"
my_dictionary.get("k2", "There is no 'k2' key value")
# + [markdown] id="N4eyTWNPy7Fc"
# For more on dictionaries, check out the [official Python dictionary documentation](https://docs.python.org/3/tutorial/datastructures.html#dictionaries).
# + [markdown] id="KiF3xtSI40Tw"
# We've learned about the most fundamental data structures in Python:
# - numbers
# - booleans
# - lists
# - tuples
# - dictionaries
#
# We've learned how to store data in variables and how to change data in variables, dictionaries and lists. Each of these data types have more functionality than we have gone over in this tutorial, so please take some time to get a broader idea of what can be done with these data types in Python.
#
# There are also many data types that we did not cover. As we encounter the need for other types of data in our study of machine learning and data science, we will introduce and explain some of them.
# + [markdown] id="qFPc3YgQ7ocD"
# ## `for` Loops
#
# The `for` loop is a powerful tool that lets us look at every item in a data structure in order and perform operations on each element.
# + id="sk8Aw0yZJCUf"
my_list = ['a', 'b', 'c']
for item in my_list:
print(item)
# + [markdown] id="dirxZstuJQnI"
# As you can see, the `for` loop executes `print` three times, once for each item in the list.
#
# The `for` loop works for tuples too.
# + id="p4Q9TFIEJZdK"
my_tuple = (5, 3, 1, -1, -3, -5)
for x in my_tuple:
print(x)
# + [markdown] id="-VfTc3TBJk5r"
# Dictionaries are a little more interesting. By default, the loop works by indexing the keys.
# + id="gSZist0eJrVY"
my_dictionary = {
"first_name": "Jane",
"last_name": "Doe",
"title": "Dr."
}
for k in my_dictionary:
print(f"{k}: {my_dictionary[k]}")
# + [markdown] id="Ir9HX9bSJ827"
# If only the dictionary's values are of concern to you, it is possible to ask the dictionary to return its values by using the `values()` method.
# + id="ktslT2NVKD3o"
for v in my_dictionary.values():
print(v)
# + [markdown] id="n4ohtpYDKRPY"
# If you want both the keys and the values without needing to lookup up `my_dictionary[k]`, you can ask the dictionary for its `items()`.
# + id="bGGDRbzPKIf9"
for (k, v) in my_dictionary.items():
print(f"{k}: {v}")
# + [markdown] id="ivbCdgpXKn0z"
# You can also use `for` to operate on a string character by character. Each item in a string is a single character.
# + id="gt5Ru7HjKkjp"
for c in "this string":
print(c)
# + [markdown] id="H2y1EkjmK6eL"
# If you want to iterate over a list or tuple and need the index of each item, you can use the `range` function along with the `len` function to get the indices of the list or tuple.
# + id="rGqDXpv6KwYy"
for i in range(len(my_list)):
print(f"{i}: {my_list[i]}")
# + [markdown] id="7vazK2DeLIxU"
# `range` is a function that returns a sequence of numbers. It can take one argument, two arguments, or three arguments.
#
# When you give one argument to `range`, it is considered to be the end of the range (exclusive).
# + id="7n2oLEzGLXw7"
for i in range(5):
print(i)
# + [markdown] id="JNHvCaZcLbOF"
# When you give two arguments to `range`, they are considered to be the start (inclusive) and end (exclusive) of the sequence.
# + id="C8dmRqSPLqQV"
for i in range(6, 12):
print(i)
# + [markdown] id="RdmqeIAdLtDK"
# When you give three arguments to `range`, they are considered to be the start (inclusive), end (exclusive), and step size of the sequence.
# + id="tu933OcnL3eq"
for i in range(20, 100, 10):
print(i)
# + [markdown] id="bxhdbYLYL7tY"
# Ranges are lazily evaluated so even very large ranges will not occupy a significant amount of memory.
# + [markdown] id="rLKvNuQlxkFg"
# `for` loops can also be useful for making a list. For example, if we want to generate a list of random numbers, we could use the `random` library within a `for` loop.
# + id="goP7QXiSiu1v"
import random
random_numbers = []
for i in range(10):
random_numbers.append(random.randint(0, 10))
print(random_numbers)
# + [markdown] id="iogbsWBcMdbr"
# ## `while` Loops
#
# The `while` loop allows you to repeat a block of code until some arbitrary condition is met.
# + id="Lo1YXGn8M6XT"
counter = 0
while counter < 5:
print("Not done yet, counter = %d" % counter)
counter += 1
print("Done, counter = %d" % counter)
# + [markdown] id="HhN1wqr1NAQx"
# `while` loops can be useful in many situations, especially those when you don't know for sure how many times you might need to loop.
#
# **Note:** You might have also noticed the `+=` operator in the example above. This is a shortcut that Python provides so that we don't have to write out `counter = counter + 1`. There are equivalents for subtraction, multiplication, division, and more.
# + [markdown] id="TwPCggdfN8GP"
# ### `break`
#
# There are times when you might want to exit a loop before it is complete. For this case, you can use the `break` statement.
#
# In the example below, the `break` statement causes the loop to exit after only five iterations, despite having a range of 1,000,000 numbers to iterate.
# + id="CHhiRnYTOCqG"
for x in range(1000000):
if x >= 5:
break
print(x)
# + [markdown] id="MldYIF3jOTF4"
# ### `continue`
#
# `continue` is similar to `break`, but instead of exiting the loop entirely, it just skips the current iteration.
#
# Let's see this in action with a loop that prints numbers between 0 and 7 except 4 and 6.
# + id="awLAZUqCOfCo"
for x in range(10):
if x == 4 or x == 6:
continue
print(x)
# + [markdown] id="u9DuFDG_p2rz"
# ## Functions
#
# Functions are a way to organize and re-use your code. Functions allow you to take a block of your code, give it a name, and then call that code by name as many times as you need to.
#
# Functions are defined in Python using the `def` statement.
# + id="BVpNQ4swQr3u"
def my_function():
print("I wrote a function")
my_function()
my_function()
my_function()
# + [markdown] id="6NcFKBi2Q8De"
# Standard function definitions always begin with the `def` keyword followed by the name of the function. Function naming follows the same rules as variable naming.
#
# All function definitions are composed of:
#
# - The `def` keyword which tells Python you are about to define a function
# - Any arguments that you want to pass to the function, wrapped in parentheses (more details on this below)
# - A colon to end the statement
#
# The function's code is indented under the function definition.
# + [markdown] id="upFwmsk3R5R4"
# The arguments that come between the parentheses hold the names of variables that can be used in the function. Function arguments, also called parameters, are used to provide the function with data.
#
# **Note:** The reason `my_function` above has nothing within the parentheses is because that particular function has no arguments. A function can have zero or more arguments.
# + id="m0Cm8vbzSOxo"
def doubler(n):
print(n*2)
doubler(2)
# + [markdown] id="tPhUa3P7T9W3"
# Instead of just printing an output, functions can also return data.
# + id="CB1b7Q-dUJkc"
def doubler(n):
return n*2
print(doubler(2))
# + [markdown] id="dECmjCN-UU0d"
# Functions can return multiple values as a tuple. The following function returns the minimum and maximum (in that order) of the numbers in a list or tuple.
# + id="umXslWytUXkT"
def min_max(numbers):
min = 0
max = 0
for n in numbers:
if n > max:
max = n
if n < min:
min = n
return min, max
print(min_max([-6, 78, -102, 45, 5.98, 3.1243]))
# + [markdown] id="3sFqKkIuVCfv"
# It is important to note that when you pass data to a function, the function gets a *copy* of the data. For numeric, boolean, and string data types, that means that the function can't directly modify the data you passed in. For lists and dictionaries, it is a little more complicated. The function gets a *copy of the location/address* of the data structure. While the function can't change that address, it *can* modify the data structure.
#
# Let's see some examples to solidify the point. In this first example we can see that the number changer can't make any changes to the variable `my_number`.
# + id="FMpdW55IVWCJ"
def number_changer(n):
n = 42
my_number = 24
number_changer(my_number)
print(my_number)
# + [markdown] id="rGdDthSaWLGt"
# The same is true for booleans. The function below can't modify `my_bool`.
# + id="0IKK0XRaWPxT"
def bool_changer(b):
b = False
my_bool = True
bool_changer(my_bool)
print(my_bool)
# + [markdown] id="8cyusJcMWbD9"
# The same is true for strings.
# + id="BiIbX1b2WeGm"
def string_changer(s):
s = "Got you!"
my_string = "You can't get me"
string_changer(my_string)
print(my_string)
# + [markdown] id="2yJ03-RCWrcs"
# However, lists can be modified. See the example below.
# + id="7Wd-GGGeWwBY"
def list_changer(list_parameter):
list_parameter[0] = "changed!"
my_list = [1, 2, 3]
list_changer(my_list)
print(my_list)
# + [markdown] id="wGqGcQvbXAKJ"
# What do you think the code below will do?
# + id="D-qahKSCXJj6"
def list_changer(list_parameter):
list_parameter = ["this is my list now"]
my_list = [1, 2, 3]
list_changer(my_list)
print(my_list)
# + [markdown] id="E7qbZizOwl1k"
# Functions cannot change the value of the entire list, they can only change individual values within the list.
# + [markdown] id="mgArUXxCXT_6"
# Dictionaries interact with functions exactly like lists do.
# + id="-hbfpZx5Xb2E"
def dictionary_changer(d):
d["my_entry"] = 100
my_dictionary = {"a": 100, "b": "bee"}
dictionary_changer(my_dictionary)
print(my_dictionary)
# + id="xcpPFrnZXtwM"
def dictionary_changer(d):
d = {"this is": "my dictionary"}
my_dictionary = {"a": 100, "b": "bee"}
dictionary_changer(my_dictionary)
print(my_dictionary)
# + [markdown] id="i7dfFTnqX2O2"
# So, how can you get a function to modify a number, bool, or string? You can simply assign the return value of the function to the original variable.
# + id="xM5nDmNuYA2N"
def number_changer(n):
return n + 1
def boolean_changer(b):
return not b
def string_changer(s):
return s.upper()
my_number = 42
my_bool = False
my_string = "Python"
my_number = number_changer(my_number)
my_bool = boolean_changer(my_bool)
my_string = string_changer(my_string)
print(my_number)
print(my_bool)
print(my_string)
# + [markdown] id="w1CW590FbAy4"
# ### Pass
#
# `pass` is a Python keyword that is used as a placeholder when code hasn't been written yet. You'll see `pass` often in your exercises as a placeholder for the code you'll need to write.
# + id="_NA64T75bXAU"
def do_nothing_function():
pass
do_nothing_function()
# + [markdown] id="o9rgbpgRNRpy"
# ### Exercise 4
# + [markdown] id="LXjVsAFwKl0s"
# Write a function that implements rock, paper, scissors below. Your function should take in the player's choice of rock, paper, or scissors and plays a game against the computer, which chooses randomly. Feel free to copy code from the "Conditional Decisions" section.
# + [markdown] id="CB_xwusBKC4I"
# #### **Student Solution**
# + id="toyYdLMWRT68"
def rock_paper_scissors(player_choice):
# Add code here that takes in the players choice of rock, paper, or scissors
# and plays a game against the computer, which chooses randomly.
# You can copy code from the "Conditional Decisions" section if you like.
pass
# + id="Vk_s5QMnTf-Y"
rock_paper_scissors("rock")
rock_paper_scissors("paper")
rock_paper_scissors("scissors")
# + [markdown] id="dnKNAm9bJt4a"
# ---
# + [markdown] id="D_bKEdYtZdFd"
# ## Comments
#
# Comments are simply pieces of your code that will be skipped over when the program is running. While this may sound meaningless, comments are a crucial way for you to communicate with readers of your code (which often includes your future self) about what your code does. Do not underestimate the importance of good code commenting!
#
# Python considers the hash (`#`) to be the start of a comment. This hash can be anywhere in a line. Anything after the hash on the same line won't be executed.
#
# Let's look at an example.
# + id="y6OIz3kWaL5a"
# This is a comment used to document.
# If I need more than one line,
# then I need to add more hashes.
print("Hello") # Comments don't have to be at the start of a line
# print("This won't run")
# + [markdown] id="rgjTJDgFahSC"
# # More Exercises
# + [markdown] id="FwMbxGGgfqU3"
# ## Practice Problems
#
# In case you want to go back and look at the previous exercises, we've made some links here to make it easy to go back and find them.
#
# * [Printing](#scrollTo=ED0MmXO9Ytkl)
# * [Integers](#scrollTo=fqQQ2FVTN0__)
# * [Booleans](#scrollTo=l1kunoRTl5GQ)
# * [Functions](#scrollTo=o9rgbpgRNRpy)
#
# Once you feel comfortable with the concepts we've covered, you can move on to the challenge problems below.
# + [markdown] id="5CORN6WcalLz"
# ## Exercise 5
# + [markdown] id="KlzvSZ13rW19"
# In the code block below, complete the function by making it return the number cubed.
# + [markdown] id="31lkzGYRxu9P"
# ### Student Solution
# + id="-9DYcdrWa3cJ"
def cube(n):
pass # your code goes here
print(cube(5))
# + [markdown] id="DSwxHrmpVok3"
# ---
# + [markdown] id="vCd361qEbiRB"
# ## Exercise 6
# + [markdown] id="TuYoUIixrY7T"
# In the code block below, complete the function by making it return the sum of the even numbers of the provided sequence (list or tuple).
# + [markdown] id="XUvoqKWNx5hV"
# ### Student Solution
# + id="TYCBS1xXbsoW"
def sum_of_evens(seq):
pass # your code goes here
print(sum_of_evens([5, 14, 6, -2, 0, 45, 66]))
# + [markdown] id="jxJbGl3xVtW2"
# ---
# + [markdown] id="SNzbvnMYbwff"
# ## Exercise 7
# + [markdown] id="VPx_YEOkrbHJ"
# We've provided a helper function for you that will take a random step, returning either -1 or 1. It is your job to use this function in another function that takes in a starting value `start` and an ending value `end`, and goes on a random walk from `start` until it reaches `end`. Your function should return the number of steps required to reach `end`. (Note that it may help for debugging to print the value of the random walk at each step.)
# + id="mFVVIgnneQeX"
import random
def random_step():
# Returns either -1 or 1 at random
return random.choice([-1, 1])
# + [markdown] id="xfOuW44xx_6C"
# ### Student Solution
# + id="p4ZsxD5vcbsL"
def random_walks(start, num_steps, num_trials):
pass # your code goes here
print(random_walks(0, 5))
# + [markdown] id="csOdifBNWH0o"
# ---
| 51,099 |
/Ex2_4_Thompson_Beta.ipynb | 2e737608f4ff127432d655d938d4af2bcd9565a9 | [] | no_license | skywalker0803r/Microsoft-DAT257x-Reinforcement-Learning-Explained | https://github.com/skywalker0803r/Microsoft-DAT257x-Reinforcement-Learning-Explained | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 79,663 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## TensorFlow
#
#
# #### What is it?
# * System for workign with computational graphs over Tensor objects (analogous to numpy ndarrays).
# * Automatic differentiation (backpropogation) for Variable objects.
#
# #### Why?
# * Naive implementation of basic layers is easy, but opitimized implementation is harder.
# * GPU implementation is even harder.
# * In academia, industry, and PSIML projects, you are far more likely to use a framework that to code ConvNets from scratch.
#
# #### How to learn it?
# * This notebook will try to cover a wide range of stuff.
# * References to additional resources will be given where needed.
# * You're welcome to use many excellent tutorials on the web, including the [one from Google](https://www.tensorflow.org/get_started/get_started).
#
# First import TensorFlow module, as we will be using it throughout. This also verifies that TensorFlow installation is OK.
import tensorflow as tf
import numpy as np
from matplotlib import pyplot as plt
# %matplotlib inline
import math
from utils.vis_utils import visualize_grid
from utils.data_utils import load_CIFAR10
# ### Dataset
#
# We will use CIFAR-10 dataset from https://www.cs.toronto.edu/~kriz/cifar.html. The dataset contains 50,000 training images and 10,000 test images. All images are 32 x 32 x 3. Each image is classified as one of 10 classes.
#
# We call a utility function to load CIFAR-10 data.
# Then we divide the data into training, validation, and test set.
# Finally we normalize data by subtracting mean image from each sample.
# Note that the mean image is computed from training set only.
def get_CIFAR10_data(num_training = 49000, num_validation = 1000, num_test = 10000):
# Load the raw CIFAR-10 data
cifar10_dir = 'cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis = 0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
return X_train, y_train, X_val, y_val, X_test, y_test, mean_image
# Invoke the above function to get data.
X_train, y_train, X_val, y_val, X_test, y_test, mean_image = get_CIFAR10_data()
# Check that the data has expected shape.
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Let's look at some random data samples.
def visualize_CIFAR10_sample(X, y, sample_count = 10, class_count = 10):
set_size = X.shape[0]
# Randomize dataset.
data = np.ndarray([set_size, 2], dtype = np.int32)
data[:, 0] = list(range(set_size))
data[:, 1] = y
data[:, :] = data[np.random.permutation(set_size)]
# Select samples.
selection = { i : [] for i in range(class_count) }
count = 0
for (ind, cls) in data:
if len(selection[cls]) < sample_count:
selection[cls] += [ind]
count += 1
if count == class_count * sample_count:
break
# Ensure that we found enough samples.
assert count == class_count * sample_count
# Flatten list.
selection_flat = [item for cls in range(class_count) for item in selection[cls]]
# Visualize samples.
plt.figure(figsize = (12, 12))
plt.imshow(visualize_grid((X[selection_flat, :, :, :] + np.reshape(mean_image, [1, 32, 32, 3]))))
plt.axis("off")
plt.show()
visualize_CIFAR10_sample(X_train, y_train)
# Now that we know what the classes represent, we can create an array of human-readable names.
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# ## Defining and examining a simple network
#
# We define a simple network consisting of basic layers:
# * convolutional layer,
# * max-pooling layer,
# * fully connected layer, and
# * ReLU activation function.
#
# TensorFlow supports many other layer types and activations. See https://www.tensorflow.org/api_guides/python/nn for official API documentation.
#
# The following line clears any network that might already exist in memory.
tf.reset_default_graph()
# ### Create TensorBoard log file
#
# We can use the TensorBoard to visualize our training data. TensorBoard parses log files (also called event files) generated by TensorFlow. We will be placing those files in a separate dir.
log_dir = './logs/'
# A new event file is created by instantiating a `tf.FileWriter` class.
writer = tf.summary.FileWriter(log_dir)
# ### Placeholders for data
# First we define placeholders for input data (input image and its label) using `tf.placeholder`.
# We will eventually bind these to actual numerical data values.
#
# We choose to represent input data as 4D tensors whose shape is N x H x W x C, where:
# * N is the number of examples in a batch (batch size)
# * H is the height of each image in pixels
# * W is the height of each image in pixels
# * C is the number of channels (usually 3: R, G, B)
#
# This is the right way to represent the data for spatial operations like convolution. For fully connected layers, however, all dimensions except batch size will be collapsed into one.
#
# In `tf.placeholder`, if a dimension has value `None`, it will be set automatically once actual data is provided.
def setup_input():
X = tf.placeholder(tf.float32, [None, 32, 32, 3], name = 'X')
y = tf.placeholder(tf.int64, [None], name = 'y')
is_training = tf.placeholder(tf.bool, name = 'is_training')
return X, y, is_training
X, y, is_training = setup_input()
# ### Convolutional and pooling nodes
# Next we start defining define the main "body" of the network.
# We start by adding a single convolutional layer with bias and ReLU activation.
#
# Convolutional layer is created by calling `tf.layers.conv2d`.
# Returned object is of type `tf.Tensor` and represents output activations of the layer.
#
# Bias is enabled by default, so it is not explicitly specified.
#
# `padding = 'SAME'` means that we allow padding of roughly half the kernel size (TensorFlow computes this value automatically), to avoid reduction in output size due to boundary effects. The other option is `padding = 'VALID'`, where padding is disabled.
#
# We use [tf.layers API](https://www.tensorflow.org/api_docs/python/tf/layers) to generate a whole layer by a single function call. It is also possible to create each parameter and operation node separately, and connect them together, but that quickly becomes cumbersome for bigger networks. See [this tutorial](https://www.tensorflow.org/tutorials/layers) for how to use `tf.layers`.
conv1 = tf.layers.conv2d(inputs = X, filters = 32, kernel_size = [7, 7], strides = 2, padding = 'SAME', activation=tf.nn.relu, name = 'conv1')
# Next we add a max-pooling node.
pool1 = tf.layers.max_pooling2d(inputs = conv1, pool_size = [2, 2], strides = 2, padding = 'SAME', name = 'pool1')
# ### View default graph in TensorBoard
#
# We can write graph data to event file we create above. A graph can be passed to `FileWriter` constructor as well, in which case it is written to file immediately after the file is created.
writer.add_graph(tf.get_default_graph())
# Now you should be able to run `tensorboard --logdir=./logs` from console (with your Python environment activated), and see the graph visualized in browser at `http://localhost:6006`.
#
# For more details please see official tutorial on [graph visualization](https://www.tensorflow.org/get_started/graph_viz).
#
# Note: graph visualization seems to work best in Google Chrome.
# ### Examining parameters and activations
# Parameter tensors are hidden inside the convolution layer we created, and we don't have a handle on them. To get one, we have to know their names. For that we can either consult TensorBoard visualization, or list all variables in the graph
tf.global_variables()
# and pick the one we need by name. The `conv1` prefix in both names refers to the `name` parameter specified when creating the layer, and NOT the Python variable `conv1`.
#
# Then we access the tensor itself using `get_tensor_by_name`. For example, we can get the shapes of kernel and bias as follows
conv1_kernel = tf.get_default_graph().get_tensor_by_name('conv1/kernel:0')
conv1_bias = tf.get_default_graph().get_tensor_by_name('conv1/bias:0')
print(conv1_kernel.shape)
print(conv1_bias.shape)
# Shapes of activation tensors are computed automatically, and can also be accessed using `shape`. Note that these tensors may have unknown dimensions which become known only when acutal input is presented.
print(conv1.shape)
print(pool1.shape)
# ### Fully connected layers
# Next we append a fully connected layer with 1024 output neurons and ReLU activation.
# In order to determine the shape of its parameter tensor, we need to know the number of input neurons, which depends on the shape of the `relu1` activation tensor.
fc1_input_count = int(pool1.shape[1] * pool1.shape[2] * pool1.shape[3])
fc1_output_count = 1024
print([fc1_input_count, fc1_output_count])
# In order to append a fully connected layer, we need to flatten the spatial dimensions of `relu1`.
pool1_flat = tf.reshape(pool1, [-1, fc1_input_count])
# Now we are ready to add a fully connected layer.
fc1 = tf.layers.dense(inputs = pool1_flat, units = 1024, activation = tf.nn.relu, name = 'fc1')
# Finally, we add another fully connected layer with bias to output scores for 10 output classes. This layer has no nonlinearity following it, but it will be followed by a softmax function to convert scores to probabilities.
class_count = 10
fc2 = tf.layers.dense(inputs = fc1, units = class_count, name = 'fc2')
# ### Final classification
# We append a softmax layer to convert the scores coming from `fc2` into probabilities, as well as a "top-k" layer to get the three most probable guesses.
prob = tf.nn.softmax(fc2)
(guess_prob, guess_class) = tf.nn.top_k(prob, k = 3)
# ### Visualizing parameters and activations
# TensorBoard supports visualizing tensors as images using `tf.summary.image` function.
# We add a subnetwork that computes images from `conv1_kernel` and `conv1`.
with tf.variable_scope('conv1_visualization'):
# Normalize to [0 1].
x_min = tf.reduce_min(conv1_kernel)
x_max = tf.reduce_max(conv1_kernel)
normalized = (conv1_kernel - x_min) / (x_max - x_min)
# Transpose to [batch_size, height, width, channels] layout.
transposed = tf.transpose(normalized, [3, 0, 1, 2])
# Display random 5 filters from the 32 in conv1.
conv1_kernel_image = tf.summary.image('conv1/kernel', transposed, max_outputs = 3)
# Do the same for output of conv1.
sliced = tf.slice(conv1, [0, 0, 0, 0], [1, -1, -1, -1])
x_min = tf.reduce_min(sliced)
x_max = tf.reduce_max(sliced)
normalized = (sliced - x_min) / (x_max - x_min)
transposed = tf.transpose(normalized, [3, 1, 2, 0])
conv1_image = tf.summary.image('conv1', transposed, max_outputs = 3)
# ### Update graph visualization
# We have added some new nodes, and we need to check if the new graph is OK.
# To update TensorBoard visualization, we just add a new graph to the event file.
# The visualizer will pick up the latest graph when its browser tab is refreshed.
writer.add_graph(tf.get_default_graph())
# ### Forward pass
# Next we run one CIFAR-10 frame through the network.
def choose_random_image():
index = np.random.randint(0, X_train.shape[0])
image = X_train[[index], :, :, :]
label = y_train[[index]]
return index, image, label
random_index, random_image, random_label = choose_random_image()
# A TensorFlow graph is executed by creating a `tf.Session` object and calling its `run` method.
# A session object encapsulates the control and state of the TensorFlow runtime.
# The `run` method requires a list of output tensors that should be computed, and a mapping of input tensors to actual data that should be used. For more information, see the TensorFlow [Getting started](https://www.tensorflow.org/get_started/get_started) guide.
#
# Optionally we can also specify a device context such as `/cpu:0` or `/gpu:0`. For documentation on this see [this TensorFlow guide](https://www.tensorflow.org/tutorials/using_gpu). The default device is a GPU if available, and a CPU otherwise, so we can skip the device specification from now on.
#
# Note: if GPU is explicitly specified, but not available, a Python exception is thrown; current graph is invalidated, and needs to be cleared and rebuilt.
with tf.Session() as sess:
with tf.device("/cpu:0") as dev: #"/cpu:0" or "/gpu:0"
# Initialize weights.
sess.run(tf.global_variables_initializer())
# Map inputs to data.
feed_dict = { X : random_image, y : random_label }
# Set up variables we want to compute.
variables = [guess_prob, guess_class, conv1_kernel_image, conv1_image]
# Perform forward pass.
guess_prob_value, guess_class_value, conv1_kernel_value, conv1_value = sess.run(variables, feed_dict = feed_dict)
# First let's see the image that was chosen, and networks predictions for it.
def visualize_classification(image, guess_class, guess_prob):
plt.imshow(image)
plt.axis("off")
plt.show()
for i in range(3):
ind = guess_class[0, i]
prob = guess_prob[0, i]
print("Class: {0}\tProbability: {1:0.0f}%".format(class_names[ind], prob * 100))
print("Ground truth: {0}".format(class_names[random_label[0]]))
visualize_classification(random_image[0, :, :, :] + mean_image, guess_class_value, guess_prob_value)
# We write generated images to file. After running the next cell the images should be visible in TensorBoard.
writer.add_summary(conv1_kernel_value)
writer.add_summary(conv1_value)
# ### Loss and metric(s)
#
# We append more nodes to compute loss value, and the number of correctly predicted pixels.
# For loss we use `tf.softmax_cross_entropy_with_logits`. For other loss functions available out of the box in TensorFlow, see https://www.tensorflow.org/api_guides/python/nn#Losses. Of course, you can always build your own custom loss functions from simpler operations.
# Setup metrics (e.g. loss and accuracy).
def setup_metrics(y, y_out):
# Define loss function.
total_loss = tf.nn.softmax_cross_entropy_with_logits(labels = tf.one_hot(y, 10), logits = y_out)
mean_loss = tf.reduce_mean(total_loss)
# Add top three predictions.
prob = tf.nn.softmax(y_out)
(guess_prob, guess_class) = tf.nn.top_k(prob, k = 3)
# Compute number of correct predictions.
is_correct = tf.equal(tf.argmax(y_out, 1), y)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
return mean_loss, accuracy, guess_prob, guess_class
# We will be reusing this function later for other architectures.
# Now we create metrics for our current network.
mean_loss, accuracy, guess_prob, guess_class = setup_metrics(y, fc2)
# ### Visualizing loss and metric(s)
# We would like to use TensorBoard to visualize loss value and correct count.
# We add special nodes that generate those logs.
#
# We also add a special node that collects all summary nodes in the network. Evaluating this node in a call to `tf.Session.run` causes all summaries to be computed.
def setup_scalar_summaries():
tf.summary.scalar('mean_loss', mean_loss)
tf.summary.scalar('accuracy', accuracy)
all_summaries = tf.summary.merge_all()
return all_summaries
all_summaries = setup_scalar_summaries()
# ### Optimizer
#
# Finally, we define the optimization algorithm to be used for training. We use the Adam optimizer with learning rate 5e-4. For other choices see https://www.tensorflow.org/api_guides/python/train#Optimizers.
#
# Optimizer's `minimize` method essentially generates a network that performs backward pass based on the forward pass network that we defined, and passed to the optimizer via argument to `minimize`.
# The result of this method is a dummy node `train_step` which, when evaluated triggers execution of backward pass.
def setup_optimizer(loss, learning_rate):
optimizer = tf.train.AdamOptimizer(learning_rate)
# Batch normalization in TensorFlow requires this extra dependency
extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(extra_update_ops):
train_step = optimizer.minimize(loss)
return train_step
# We will be reusing this function for other architectures. Now we create optimizer for our current network.
train_step = setup_optimizer(mean_loss, 5e-4)
# ### Adding an optional backward pass
# Above we saw how to execute forward pass using `tf.Session.run`. Now we wrap that into a function (since we will be calling it in a loop to train the network). We also add an option to execute a backward pass by passing the extra argument `training`. That way we can use the same function for both training (forward + backward), and evaluation (forward only).
def run_iteration(session, X_data, y_data, training = None):
# Set up variables we want to compute.
variables = [mean_loss, accuracy, guess_prob, guess_class, all_summaries]
if training != None:
variables += [training]
# Map inputs to data.
feed_dict = { X: X_data, y: y_data, is_training: training != None }
# Compute variable values, and perform training step if required.
values = session.run(variables, feed_dict = feed_dict)
# Return loss value and number of correct predictions.
return values[:-1] if training != None else values
# ### Main training/evaluation loop
# The following is a simple function which trains or evaluates current model for a given number of epochs by repeatedly calling the `run_iteration` function defined above. It also takes care of:
# * aggregating loss and accuracy values over all minibatches
# * plotting loss value over time.
def run_model(session, predict, loss_val, Xd, yd,
epochs = 1, batch_size = 64, print_every = 100,
training = None):
dataset_size = Xd.shape[0]
batches_in_epoch = int(math.ceil(dataset_size / batch_size))
# Shuffle indices.
train_indices = np.arange(dataset_size)
np.random.shuffle(train_indices)
# Count iterations since the beginning of training.
iter_cnt = 0
for e in range(epochs):
# Keep track of performance stats (loss and accuracy) in current epoch.
total_correct = 0
losses = []
# Iterate over the dataset once.
for i in range(batches_in_epoch):
# Indices for current batch.
start_idx = (i * batch_size) % dataset_size
idx = train_indices[start_idx : (start_idx + batch_size)]
# Get batch size (may not be equal to batch_size near the end of dataset).
actual_batch_size = yd[idx].shape[0]
loss, acc, _, _, summ = run_iteration(session, Xd[idx,:], yd[idx], training)
# Update performance stats.
losses.append(loss * actual_batch_size)
total_correct += acc * actual_batch_size
# Add summaries to event file.
if (training is not None):
writer.add_summary(summ, e * batches_in_epoch + i)
# Print status.
if (training is not None) and (iter_cnt % print_every) == 0:
print("Iteration {0}: with minibatch training loss = {1:.3g} and accuracy of {2:.2f}%"\
.format(iter_cnt, loss, acc * 100))
iter_cnt += 1
# Compute performance stats for current epoch.
total_accuracy = total_correct / dataset_size
total_loss = np.sum(losses) / dataset_size
print("Epoch {2}, Overall loss = {0:.3g} and accuracy of {1:.2f}%"\
.format(total_loss, total_accuracy * 100, e + 1))
return total_loss, total_correct
# ### Training the model for one epoch
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print('Training')
run_model(sess, fc2, mean_loss, X_train, y_train, 1, 64, 100, train_step)
print('Validation')
_ = run_model(sess, fc2, mean_loss, X_val, y_val, 1, 64)
# ### View summaries in TensorBoard log
# Now you should be able to refresh your TensorBoard tab and see the summaries.
# For more details please see [official tutorial](https://www.tensorflow.org/get_started/summaries_and_tensorboard) on summaries.
# TensorFlow also supports other kinds of summaries, such as [histograms](https://www.tensorflow.org/get_started/tensorboard_histograms).
# ### Visualize some predictions
# Accuracy should be somewhat better now.
random_index, random_image, random_label = choose_random_image()
_, _, guses_prob_value, guess_class_value, _ = run_iteration(sess, random_image, random_label)
visualize_classification(random_image[0, :, :, :] + mean_image, guess_class_value, guess_prob_value)
# ## Add batch normalization
#
# We modify the simple model by adding batch normalization after the convolution layer. We expect this network to train faster, and achieve better accuracy for the same number of weight updates.
#
# For convenience, we collect previous network definition code into a function `bn_model`. One difference from the above code is the line that defines the batch normalization layer. Parameters of `bn_model` are input placeholders, and its return value is the output tensor (plus any other tensors that are needed outside the function, i.e. for inspection or visualization). Another input parameter `is_training` is used to select between training and inference version of the network, because batch normalization behaves differently during training and inference.
#
# All parameter and activation shapes are the same as before, since batch normalization does not modify the shape of its input. Hence `fc1_input_count` and `fc1_output_count` computed above are valid.
#
# API reference for batch normalization is at https://www.tensorflow.org/api_docs/python/tf/layers/batch_normalization.
def bn_model(X, y, is_training):
# Convolution layer.
conv1 = tf.layers.conv2d(inputs = X, filters = 32, kernel_size = [7, 7], strides = 2, padding = 'SAME', activation=tf.nn.relu, name = 'conv1')
# Batch normalization layer.
bn1 = tf.layers.batch_normalization(conv1, training = is_training)
# Pooling layer.
pool1 = tf.layers.max_pooling2d(inputs = bn1, pool_size = [2, 2], strides = 2, padding = 'SAME', name = 'pool1')
# First fully connected layer.
pool1_flat = tf.reshape(pool1,[-1, fc1_input_count])
fc1 = tf.layers.dense(inputs = pool1_flat, units = 1024, activation = tf.nn.relu, name = 'fc1')
# Second fully connected layer.
fc2 = tf.layers.dense(inputs = fc1, units = class_count, name = 'fc2')
return fc2
# Input, metrics and optimizer are the same as before, so we can assemble the whole network.
tf.reset_default_graph()
writer = tf.summary.FileWriter(log_dir)
X, y, is_training = setup_input()
y_out = bn_model(X, y, is_training)
mean_loss, accuracy, guess_prob, guess_class = setup_metrics(y, y_out)
all_summaries = setup_scalar_summaries()
train_step = setup_optimizer(mean_loss, 5e-4)
# Now we are ready to train and validate the network with batch normalization as before.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('Training')
run_model(sess, y_out, mean_loss, X_train, y_train, 1, 64, 100, train_step)
print('Validation')
run_model(sess, y_out, mean_loss, X_val, y_val, 1, 64)
# ## Exercise: build given architecture
#
# Your task is now to build the architecture contained in event file `cifar10_net_log\cifar10_net_log`, and train it on CIFAR-10. You should train for 8 epochs with batch size 100 and learning rate 0.001.
def cifar10_net(X, y, is_training):
# TODO
pass
tf.reset_default_graph()
writer = tf.summary.FileWriter(log_dir)
X, y, is_training = setup_input()
y_out = cifar10_net(X, y, is_training)
mean_loss, accuracy, guess_prob, guess_class = setup_metrics(y, y_out)
all_summaries = setup_scalar_summaries()
train_step = setup_optimizer(mean_loss, 1e-3)
writer.add_graph(tf.get_default_graph())
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('Training')
run_model(sess, y_out, mean_loss, X_train, y_train, 8, 100, 100, train_step)
print('Validation')
run_model(sess, y_out, mean_loss, X_val, y_val, 1, 100)
# ## Saving and loading models
#
# Saving is done using `tf.train.Saver` class:
# * `save` method saves both network definition and weights.
# * `export_meta_graph` method saves only network definition.
#
# Loading is done in two stages:
# * `tf.train.import_meta_graph` function loads network definition, and returns a saver object that was used to save the model.
# * `restore` method of the returned saver object loads the weights.
#
# Note that since weights are available only inside a session, `save` and `restore` methods above require a session object as a parameter.
#
# Official TensorFlow documentation: [Saving and Restoring Variables](https://www.tensorflow.org/api_guides/python/state_ops#Saving_and_Restoring_Variables), [tf.train.Saver class](https://www.tensorflow.org/api_docs/python/tf/train/Saver), [tf.train.import_meta_graph function](https://www.tensorflow.org/api_docs/python/tf/train/import_meta_graph).
#
# Useful unofficial tutorial on saving and loading: http://cv-tricks.com/tensorflow-tutorial/save-restore-tensorflow-models-quick-complete-tutorial/
# ## Transfer learning
#
# In this section we will start from a model which is pretrained on ImageNet, and finetune it for our CIFAR-10 recognition task.
#
# Pretrained model is given by meta-graph file (containing network definition), and checkpoint file (containing weights).
pretrained_meta_graph = r"inception_resnet_v2\inception_resnet_v2_2016_08_30.meta"
pretrained_checkpoint = r"inception_resnet_v2\inception_resnet_v2_2016_08_30.ckpt"
# For the CIFAR-10 classification task we need to perform these two modifications to the pretrained mode at the very minimum:
# * Process CIFAR-10 images so that their size becomes what pretrained model expects
# * Adapt the last fully connected layer (which does final classification) so that the number of output neurons is 10 (the number of classes in the CIFAR-10 classification task)
#
# ### Get names of relevant nodes
#
# Modifying input part of a pretrained network is somewhat cumbersome. It has to be done simultaneously with loading network definition, by passing to `tf.train.import_meta_graph` a mappping from input tensors of the pretrained network to new input tensors.
#
# First we load pretrained network definition only to get the names of input placeholder nodes that we want to replace. This step can be skipped if these names are already known.
tf.reset_default_graph()
writer = tf.summary.FileWriter(log_dir)
_ = tf.train.import_meta_graph(pretrained_meta_graph)
# We can get the nodes' names using TensorBoard.
writer.add_graph(tf.get_default_graph())
# Alternatively, we can do it programmatically.
for op in tf.get_default_graph().get_operations():
if op.type == 'Placeholder':
print(op.outputs[0].name + '\t' + str(op.outputs[0].shape))
# Get the name of the fully connected layer that does final classification.
for op in tf.get_default_graph().get_operations():
if op.type == "MatMul":
print('Operation: ' + op.name)
print('Inputs: ')
for inp in op.inputs:
print(inp.name + '\t' + str(inp.shape))
# ### Modify input and output
# Next we clear the default graph, and start creating new one, with modified input subnetwork which, which upsamples input image to match the size expected by pretrained network.
tf.reset_default_graph()
writer = tf.summary.FileWriter(log_dir)
X, y, is_training = setup_input()
X_upsampled = tf.image.resize_images(X, [299, 299])
# Finally, we reload pretrained network definition, replacing pretrained input placeholders with new tensors we just created.
saver = tf.train.import_meta_graph(pretrained_meta_graph,
input_map = { 'Placeholder:0' : X_upsampled, 'Placeholder_2:0' : is_training })
# Get a handle to the tensor containing classification features.
feat = tf.get_default_graph().get_tensor_by_name("InceptionResnetV2/Logits/Dropout/cond/Merge:0")
# Attach a new fully connected layer for modified task.
class_count = 10
fc1 = tf.layers.dense(feat, class_count)
# ### Complete network definition
# Add metrics and optimizer as before.
mean_loss, accuracy, guess_prob, guess_class = setup_metrics(y, fc1)
all_summaries = setup_scalar_summaries()
train_step = setup_optimizer(mean_loss, 5e-4)
# Once again write out the graph to make sure surgery succeeded.
writer.add_graph(tf.get_default_graph())
# ### Train and validate network (at your own risk!)
# Only now we can restore weights from checkpoint, because weights exist only inside a session.
with tf.Session() as sess:
saver.restore(sess, pretrained_checkpoint)
print('Training')
run_model(sess, y_out, mean_loss, X_train, y_train, 1, 64, 100, train_step)
print('Validation')
run_model(sess, y_out, mean_loss, X_val, y_val, 1, 64)
| 29,957 |
/lectures/05-design.ipynb | 3eb28195787be8c2d05110529d4ee55a6ca446db | [
"MIT"
] | permissive | MelLain/mipt-python | https://github.com/MelLain/mipt-python | 94 | 80 | MIT | 2022-05-07T10:51:15 | 2022-04-16T11:04:20 | Jupyter Notebook | Jupyter Notebook | false | false | .py | 46,549 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # `Практикум по программированию на языке Python`
# <br>
#
# ## `Занятие 5: Основы ООП: проектирование кода, шаблоны проектирования`
# <br><br>
#
# ### `Мурат Апишев (mel-lain@yandex.ru)`
#
# #### `Москва, 2022`
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Напоминание: принципы ООП`
#
# - **Абстракция** - выделение важных свойств объекта и игнорирование прочих<br><br>
#
# - **Инкапсуляция** - хранение данных и методов работы с ними внутри одного класса с доступом к данным только через методы<br><br>
#
# - **Наследование** - возможность создания наследников, получающих все свойства родителей с возможностью их переопределения и расширения<br><br>
#
# - **Полиморфизм** - возможность использования объектов разных типов с общим интерфейсом без информации об их внутреннем устройстве
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Напоминание: класс, объект, интерфейс`
#
# - __Класс__ представляет собой тип данных (как int или str)
# - Это способ описания некоторой сущности, её состояния и возможного поведения
# - Поведение при этом зависит от состояния и может его изменять<br><br>
# - __Объект__ - это конретный представитель класса (как переменная этого типа)
# - У объекта своё состояние, изменяемое поведением
# - Поведение полностью определяется правилами, описанными в классе<br><br>
# - __Интерфейс__ - это класс, описывающий только поведение, без состояния
# - Создать объект типа интерфейса невозможно (если есть их поддержка на уровне языка)
# - Поведение полностью определяется правилами, описанными в классе
# - Вместо этого описываются классы, которые реализуют этот интерфейс и, в то же время, имеют состояние
# <br><br>
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Виды отношений между классами`
#
# - __Наследование__ - класс наследует класс<br><br>
# - __Реализация__ - класс реализует интерфейс<br><br>
# - __Ассоциация__ - горизонтальная связь между объектами двух классов (может быть "один ко многим")<br><br>
# - __Композиция__ - вложенность объекта одного класса в другой (главный управляет жизненным циклом зависимого)<br><br>
# - __Агрегация__ - вложенность объекта одного класса в другой (объекты остаются независимыми)
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Объектно-ориентированное проектирование`
#
# - Проектирование - определение наборов интерфейсов, классов, функций, их свойств и взаимных отношений<br><br>
#
# - Система для решения одной и той же задачи может спроектирована многими способами<br><br>
#
# - Задача в том, чтобы спроектировать систему, которая будет<br><br>
# - понятной, поддерживаемой
# - неизбыточнойм
# - несложно модифицируемой и расширяемой
# - эффективной<br><br>
#
# - Для этого нужны собственный опыт, знания, основанные на опыте предшественников и владение возможностями языка<br><br>
#
# __ВАЖНО:__
# - шаблоны, завязанные на типизацию и интерфейсы, могут работать в Python и без строгой иерархии наследования за счёт duck-typing
# - при использовании аннотирования типов и статической проверки с номинальной типизацией это станет невозможным
# - по этой причине в коде везде определяются и наследуются корректные интерфейсы
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Набор правил SOLID`
#
# - __Принцип единственной ответственности__ - объект класса отвечает за одну и только одну задачу<br><br>
#
# - __Принцип открытости/закрытости__ - класс должен быть открыт для расширения функциональности и закрыт для изменения<br><br>
#
# - __Принцип подстановки Барбары Лисков__ - наследник класса должен только дополнять родительский класс, а не менять в нём что-либо<br><br>
#
# - __Принцип разделения интерфейса__ - интерфейсы не должны покрывать много задач (класс не должен реализовывать методы, которые ему не нужны из-за слишком большого базового интерфейса)<br><br>
#
# - __Принцип инверсии зависимостей__ - нижестоящее в иерархии зависит от вышестоящего, а не наоборот<br><br>
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Принцип единственной ответственности`
#
# Объект класса отвечает за одну и только одну задачу (имеет не более одной причины для изменения)
# -
class Analyzer:
def read_data(self, input_path):
pass
def process_data(self):
pass
def print_results(self, output_path):
pass
# - Класс состоит из трех независимых функциональностей - есть три причины для изменения.
# - Лучше разделить его на три класса, объединяемых на уровне интерфейса
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Принцип открытости/закрытости`
#
# - Класс должен быть открыт для расширения функциональности и закрыт для изменения
#
# - Все последующие изменения должны производиться добавлением нового кода, а не переписыванием существующего
# -
class Croupier:
def create_game(self):
print('Deal two cards')
print('Open five cards one by one')
print('Count the winnings')
# - Данный крупье умеет обслуживать игру в техасский холдем
# - Если захотим использовать его для игры в преферанс, то придется меня логику `create_game`
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Принцип открытости/закрытости`
#
# Исправить ситуацию можно по-разному, например, так:
# -
class Croupier:
def create_game(self, game):
game.startSteps()
game.middleSteps()
game.finishSteps()
def startSteps(self): pass
def middleSteps(self): pass
def finishSteps(self): pass
class TexasHoldemGame(Croupier):
def startSteps(self): print('Deal two cards')
def middleSteps(self): print('Open three cards one by one')
def finishSteps(self): print('Count the winnings')
class PreferansGame(Croupier):
def startSteps(self): print('Deal ten cards')
def middleSteps(self): print('Deal buy-in')
def finishSteps(self): print('Count the winnings')
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Принцип подстановки Лисков (LSP)`
#
# - Наследник класса должен только дополнять родительский класс, а не менять в нём что-либо<br><br>
# - Должна быть возможность вместо базового типа подставить любой его подтип<br><br>
# - Принцип не означает, что не нужно использовать перегрузку виртуальных методов<br><br>
#
# - Требуется, чтобы код наследника:
# - не изменял состояния родительского класса
# - не расширял предусловия
# - не сужал постусловия
# +
class Base:
def method(self, value):
if not isinstance(value, int):
raise Exception
return abs(value)
class Deriv(Base):
def method(self, value):
if not isinstance(value, int) and value < 0:
raise Exception # 1st error (more pre-conditions)
return value # 2st error (less post-conditions)
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Принцип разделения интерфейса`
#
# - Интерфейсы не должны покрывать много задач
# - Класс не должен реализовывать методы, которые ему не нужны из-за слишком большого базового интерфейса
# -
class CookingMachine:
def prepare_ingredients(self): pass
def heat_meal(self): pass
# - Такой подход работает для горячих блюд
# - Допустим, мы захотели приготовить салат - метод `heat_meal` на больше не нужен
# - Если же мы хотим сделать сок, то ингредиенты надо выжимать - нужен новый один метод `squeeze`, который лишний для блюд и салатов
#
# Проблему можно решить выстраиванием правильного набора интерфейсов - своего на каждый тип приготавливаемых блюд и напитков
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Принцип инверсии зависимостей`
#
# - Нижестоящее в иерархии зависит от вышестоящего, а не наоборот<br><br>
#
# - Принцип состоит из двух пунктов:<br><br>
# - Модули верхних уровней не должны зависеть от модулей нижних уровней. Оба типа модулей должны зависеть от абстракций
# - Абстракции не должны зависеть от деталей. Детали должны зависеть от абстракций<br><br>
#
# - Главное, что стоит вынести отсюда:<br><br>
# - нужно программировать на уровне интерфейсов
# - нужно избавляться от вложенных зависимостей
# - объекты в системе должны быть слабосвязанными
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Принцип инверсии зависимостей`
# +
class Printer:
@staticmethod
def print_text(text):
print(text)
class Class:
def __init__(self, value):
self.value = value
self.printer = Printer()
def print_value(self):
self.printer.print_text(self.value)
# -
# - `Class` зависит от `Printer`
# - `Class` может использовать для печати только `Printer`, т.е. стандартный вывод
#
# Принцип нарушен - связь между классами жесткая безо всякой нужды
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Принцип инверсии зависимостей`
#
# Модифицируем код так, чтобы убрать зависимости:
# +
class PrinterInterface:
@staticmethod
def print_text(text):
raise NotImplementedError
class StdoutPrinter(PrinterInterface):
@staticmethod
def print_text(text):
print(text)
class Class:
def __init__(self, value, printer):
self.value = value
self.printer = printer
def print_value(self):
self.printer.print_text(self.value)
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Пример: отправитель отчётов`
#
# - Все принципы связаны между собой и нарушение одного часто приводит к нарушению сразу нескольких
# - Рассмотрим ниже пример (источник: https://blog.byndyu.ru/2009/12/blog-post.html)
# -
class Reporter:
def send_reports():
report_builder = ReportBuilder()
reports = report_builder.create_reports()
if len(reports) == 0:
raise Exception
report_sender = EmailReportSender()
for report in reports:
report_sender.send(report)
# Какие есть проблемы?
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Пример: отправитель отчётов`
#
# Проблем в таком коде полно:
#
# - сложность тестирования: как проверить поведение `Reporter`, которое зависит от других классов?<br><br>
# - высокая связность, `Reporter`<br><br>
# - требует, чтобы отчёты выдавал именно `ReportBuilder`
# - требует, чтобы отправление производил именно `EmailReportSender`
# - содаёт объекты обоих классов внутри себя<br><br>
#
# - Нарушаются сразу три принципа:<br><br>
# - принцип единственной ответственности (класс должен просто отправлять отчёты)
# - принцип открытости/закрытости (для отправки отчётов не по e-mail нам потребуется исправлять `Reporter`)
# - принцип инверсии зависимостей (классы жёстко зависят друг от друга)
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Пример: отправитель отчётов`
#
# Изменим код так, чтобы исправить эти проблемы:
# +
class ReportBuilderInterface:
def create_reports(self):
raise NotImplementedError
class ReportSenderInterface:
def send(self, report):
raise NotImplementedError
class Reporter:
def __init__(self, report_builder, report_sender):
self.report_builder = report_builder
self.report_sender = report_sender
def send_reports():
reports = report_builder.create_reports()
if len(reports) == 0:
raise Exception
for report in reports:
report_sender.send(report)
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Шаблоны ОО-проектирования`
#
# - Набор практик и рекомендаций по организации кода, полученных опытным путём<br><br>
# - Оптимизируют решения типовых задач, упрощают разработку и поддержание кода<br><br>
# - Существуют десятки шаблонов, кратко рассмотрим несколько основных:<br><br>
# - Одиночка (Singleton)
# - Стратегия
# - Примесь (Mixin)
# - Фасад
# - Адаптер, DAO
# - Простая фабрика
# - Фабричный метод
# - Абстрактная фабрика
# - Декоратор
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Шаблон Singleton`
#
# - Иногда возникает необходимость создать класс, который может иметь не более одного экземпляра (например, глобальный конфиг)
# - Есть несколько способов реализации такого поведения
#
# Один из вариантов:
# -
class Singleton:
def __new__(cls): # is called before init to create object
if not hasattr(cls, 'instance'):
cls.instance = super().__new__(cls)
return cls.instance
# +
s = Singleton()
s.some_attr = 'some_attr'
s_1 = Singleton()
s_1.some_attr
# -
# Singleton также удобно реализовывать с помощью статического метода
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Стратегия`
#
# - В системе может быть несколько различных алгоритмов, выполняющих одну задачу разными способами
# - __Неправильный__ способ: собрать все в один класс
# - __Правильный__ - _стратегия_:
# - все алгоритмы реализуются своими классами, реализующими общий интерфейс
# - общий контекст занимается подменой алгоритмов и вызовом общей процедуры выполнения
# +
class StrategyInterface:
def execute(self, value): raise NotImplementedError
class StrategyA(StrategyInterface):
def execute(self, value): return value
class StrategyB(StrategyInterface):
def execute(self, value): return value * 2
class Context:
def set_strategy(strategy):
self.strategy = strategy
def execute(value):
return self.strategy.execute(value)
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Шаблон Mixin`
#
# - Mixin - шаблон, основанный на множественном наследовании<br><br>
# - Используется в одном из двух основных случаев:<br><br>
# - хочется иметь в одном классе много опциональных признаков
# - хочется использовать один и тот же признак во многих классах<br><br>
#
# - Реализуется в виде класса, содержащего необходимую функциональность<br><br>
#
# - Далее этот класс наследуется любым нужным классом, умеющим эту функциональность использовать<br><br>
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Пример использования Mixin`
# +
class VersionMixin:
def set_version(self, version):
self._version = version
@property # One more example of decorator
def version(self):
return self._version if hasattr(self, '_version') else 'Unk. version'
class SomeBaseClass:
def __init__(self, value):
self.value = value
class SomeClass(SomeBaseClass, VersionMixin):
def __init__(self, value):
super().__init__(value)
sc = SomeClass(10)
print(sc.value)
sc.set_version(1.0)
print(sc.version)
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Шаблон Фасад`
#
# Предназначен для сокрытия сложного поведения за простым внешним интерфейсом
# -
class ATMImpl:
def check_pin(self, pin): pass
def make_db_request(self, user): pass
def check_money(self, user, amount): pass
def send_money(self, amount): pass
...
class ATM:
def __init__(self, atm_impl):
self.atm_impl = atm_impl
def get_money(self, pin, user, amount):
pass # use self.atm_impl methods to proceed operation
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Шаблон Адаптер`
#
# - Предназначен для соединения двух систем, которые могут работать вместе, но имеют несовместимые интерфейсы
# - Особенно полезен при работе с внешними API
# +
class ValueProvider:
def __init__(self, value):
if not isinstance(value, int):
raise ValueError
self._value = value
def get_value(self):
return self._value
class ValueUser:
def use_value(self, value_with_name):
if not isinstance(value_with_name, tuple):
raise ValueError
print(value_with_name[0])
# +
class ValueAdapter:
def transform_value(self, value):
return (value, '')
vp = ValueProvider(10)
va = ValueAdapter()
vu = ValueUser()
vu.use_value(va.transform_value(vp.get_value()))
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Шаблон Data Access Object`
#
# - Предоставление единого интерфейса доступа к источнику данных (как правило, к БД)
# - DAO скрывает детали уставления соединений, посылки и обработки результатов запросов и т.п.
# - По сути, это адаптер между клиентским приложением и базой данных
# +
class DbDao:
def request(self, query): raise NotImplementedError
class DbOneDao(DbDao):
def __init__(self, db_one_config):
self._db_one_impl = DbOne(db_one_config)
def request(self, query):
return self._db_one_impl.request(query)
class DbTwoDao(DbDao):
def __init__(self, db_two_config):
self._db_two_impl = DbTwo(db_two_config)
def request(self, query):
return self._db_two_impl.proceed_request(query)
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Фабрики`
#
# - Фабрика - это общая идея создания объекта с помощью какой-то другой сущности<br><br>
# - Создавать объекты могут<br><br>
# - обычный функции
# - методы этого же класса (в т.ч. и статические)
# - методы других классов<br><br>
# - При это в зависимости от ситуации могут использоваться как примитивные решения, так и сложные фабрики<br><br>
# - Рассмотрим ниже три основных фабричных шаблона:<br><br>
# - простая фабрика
# - фабричный метод
# - абстрактная фабрика
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Шаблон Простая фабрика`
#
# - Есть объект класса `VendingMachine`, описывающий торговый автомат
# - Объект имеет метод `get_item`, который по имени товара возвращает объект этого товара
# -
class VendingMachine:
def get_item(self, item):
if item == 'cola':
return Cola() # skip ammount checks
elif item == 'chocolate':
return Сhocolate()
else:
raise ValueError('Unknown item')
# - Создаваемые продукты могут быть никак не связаны друг с другом
# - Класс `VendingMachine` является простой фабрикой
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Шаблон Фабричный метод`
#
# - Есть объект класса `CottageBuilder`, который производит объекты-коттеджи `Cottage`<br><br>
# - Весь наш программный комплекс заточен под обработку производства коттеджей<br><br>
# - В некоторый момент было принято решение начать производство многоэтажных домов (`MultiStoreyBuilding`)<br><br>
# - Неправильная архитектура приведёт к необходимости переписывания кода во многих местах<br><br>
# - Фабричный метод позволяет избежать этого
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Шаблон Фабричный метод: общая схема`
#
# - Имеется класс-производитель `ConcreteFactory` и класс-продукт `ConcreteProduct`
# - Вводим интерфейс производителя `FactoryInterface` и интерфейс продукта `ProductInterface`
# - Каждый конкретный производитель получает возможность создавать конкретный продукт с помощью одного и того же метода
#
# Для нашего примера код выглядит так:
# +
class BuilderInterface:
def build(self):
raise NotImplementedError
class BuildingInterface:
def open_door(self, door_number): raise NotImplementedError
def open_window(self, window_number): raise NotImplementedError
...
# +
class CottageBuilder(BuilderInterface):
def build(self):
return Cottage()
class MultiStoreyBuilder(BuilderInterface):
def build(self):
return MultiStoreyBuilding()
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Шаблон Фабричный метод: общая схема`
#
# Осталось описать код классов продуктов (то есть зданий):
# -
class Cottage(BuildingInterface):
def open_door(self, door_number): self._open_door(door_number)
def open_window(self, window_number): self._open_window(window_number)
...
class MultiStoreyBuilder(BuildingInterface):
def open_door(self, door_number): self._open_door(door_number)
def open_window(self, window_number): self._open_window(window_number)
...
# - теперь в коде нет разницы, с каким зданием мы работаем, интерфейс у них всех общий
# - тип здания, который нам надо построить, определяется только внутри конкретного строителя
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Шаблон Абстрактная фабрика`
#
# - Шаблон предназначен для создания систем взаимосвязанных объектов без указания их конкретных классов<br><br>
# - Проще всего разобрать на примере (источник: https://refactoring.guru/ru/design-patterns/abstract-factory)<br><br>
# - Задача:<br><br>
# - есть набор одних и тех же элементов графического интерфейса (CheckBox, Button, TextField и прочие)
# - в каждой операционной системе должны отображаться все эти элементы
# - при этом в каждой OS собственный стиль отображения<br><br>
#
# - Опишем использование абстрактной фабрики для такого случая
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Интерфейсы фабрик и продуктов`
# -
class AbstractGui: # more declarative than necessary, remember about duck-typing
def create_check_box(self): raise NotImplementedError
def create_button(self): raise NotImplementedError
def create_text_field(self): raise NotImplementedError
...
class AbstractCheckBox:
def set_state(self): raise NotImplementedError
...
class AbstractButton:
def on_press(self): raise NotImplementedError
...
class AbstractTextField:
def is_empty(self): raise NotImplementedError
...
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Конкретные классы фабрики и продуктов`
# -
class WindowsGui(AbstractGui):
def create_check_box(self): return WindowsCheckBox()
def create_button(self): return WindowsButton()
def create_text_field(self): return WindowsTextField()
...
class WindowsCheckBox(CheckBox):
def set_state(self): self._get_state_impl()
...
class WindowsButton(AbstractButton):
def on_press(self): self._on_press_impl()
...
class WindowsTextField(AbstractTextField):
def is_empty(self): self._is_empty_impl()
...
# Аналогично определим классы для Mac OS и Ubuntu (Unity)
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Использование абстрактной фабрики`
# -
class GuiApplication:
def __init__(self):
import sys
self.gui = None
if sys.platform == 'win32':
self.gui = WindowsGui()
elif sys.platform == 'linux':
self.gui = LinuxGui()
elif sys.platform == 'darwin':
self.gui = MacOsGui()
else:
raise SystemError(f'Unsupported OS type {sys.platform}')
def draw_window(self):
# write cross-platform code, based on interface, not implementations
self.gui.create_button()
self.gui.create_check_box()
self.gui.create_text_field()
...
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Шаблон Декоратор`
#
# - Шаблон проектирования, позволяющий динамически наделить объект дополнительными свойствами<br><br>
# - Реализация включает в себя интерфейс, реализующие его объекты-компоненты и объекты-декораторы<br><br>
# - Задача из жизни: сбор набора метрик для отправки в сервис мониторинга<br><br>
# - собирается несколько сотен различных метрик
# - метрики могут агрегироваться несколькими способами (mean/median/mode/max/min по интервалу)
# - метрики могут преобразовываться несколькими способами (положительная срезка, фильтрация значений)<br><br>
# - Модификаторы могут применяться в произвольных сочетаниях, количествах и порядках<br><br>
#
# - Как можно реализовать подобную систему без необходимости расписывать кучу условий?
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Декоратор идеален для такой задачи`
#
# Опишем интерфейс метрики и реализующие его классы:
# -
class MetricInterface(): # again for Python it's unnecessary declaration
def calculate(self): raise NotImplementedError
class MetricA(MetricInterface):
def calculate(self):
self.values = self._calculate_a() # returns list
class MetricB(MetricInterface):
def calculate(self):
self.values = self._calculate_b() # returns list
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Собственно декораторы`
#
# Теперь опишем классы операций над метриками, которые тоже реализуют интерфейс метрики:
# -
class CalculateMeanDecorator(MetricInterface):
def __init__(self, metric):
self.metric = metric
def calculate(self):
self.values = [self._calculate_mean(self.metric.calculate())] #[scalar] == list
class FilterZerosDecorator(MetricInterface):
def __init__(self, metric):
self.metric = metric
def calculate(self):
self.values = list(filter(lambda x: x != 0.0, self.metric.calculate()))
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Использование декоратора`
#
# Теперь можно легко создавать метрики и тут же снабжать их нужными свойствами:
# +
metric_1 = MetricA()
metric_2 = MetricB()
metric_3 = FilterZerosDecorator(MetricA())
metric_4 = CalculateMeanDecorator(FilterZerosDecorator(MetricB()))
for metric in [metric_1, metric_2, metric_3, metric_4]:
metric.calculate()
MetricSender.send(metric.values)
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Снова о декораторах в синтаксисе языка Python`
#
# - Шаблон "Декоратор" и декоратор в Python - это разные вещи!
# - Шаблон используется обычно в языках со статической типизацией для динамического добавления объектам новых свойств
# - Декораторы в Python - это инструмент языка, предназначенный для добавления новых свойств функциям, классам и методам на этапе их определения
# - В Python это особенно просто за счёт duck-typing
# - Этот тот случай, когда нужно вспомнить о замыканиях (функциях, возвращающих функции)
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Пример декоратора функции`
#
# Опишем декоратор, замеряющий время работы функции:
# -
def timed(callable_obj):
import time
def __timed(*args, **kw):
time_start = time.time()
result = callable_obj(*args, **kw)
time_end = time.time()
print('{} {:.3f} ms'.format(callable_obj.__name__,
(time_end - time_start) * 1000))
return result
return __timed
@timed
def func():
for i in range(1000000):
pass
func()
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Пример декоратора функции`
#
# Посмотрим, что это за функция на самом деле:
# -
import inspect
lines = inspect.getsource(func)
print(lines)
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Пример декоратора класса`
#
# Опишем декоратор класса, который получает на вход декоратор функции и оборачивает его вокруг каждого публичного метода класса:
# -
def decorate_class(decorator):
def _decorate(cls):
for f in cls.__dict__:
if callable(getattr(cls, f)) and not f.startswith("_"):
setattr(cls, f, decorator(getattr(cls, f)))
return cls
return _decorate
# +
@decorate_class(timed)
class Cls:
a = 10
def method(self): pass
def _method_2(self): pass
Cls().method()
Cls().a # not callable
Cls()._method_2() # not public
# -
# При определении класса он производится вызов `decorate_class._decorate`, которая возвращает обновлённый класс.
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Мета-классы в Python`
#
# - Вспомним, что в Python всё, включая классы, является объектами <br><br>
# - Это позволяет создавать классы и менять их свойства (ещё один способ помимо декораторов)<br><br>
# - Описание класса определяет свойства объекта<br><br>
# - Описание мета-класса определяет свойства класса<br><br>
# - В Python есть один класс, не являющийся объектом - это мета-класс `type`
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Мета-классы в Python`
#
# `type` можно использовать напрямую для создания классов:
# +
Class = type('MyClass', (object, ), {'field': lambda self: 'value'})
c = Class()
print(type(c))
print(c.field())
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Мета-классы в Python`
#
# - А можно на основе `type` описывать мета-классы
#
# - Напишем простой мета-класс, который добавляет метод `hello` в создаваемый класс
#
# - Источник примера: https://gitjournal.tech/metaklassy-i-metaprogrammirovanie-v-python/
# +
class HelloMeta(type): # always inherit type or it's childs
def hello(cls):
print("greetings from %s, a HelloMeta type class" % (type(cls())))
# call meta-class
def __call__(self, *args, **kwargs):
cls = type.__call__(self, *args, **kwargs) # create class as usual
setattr(cls, "hello", self.hello) # add 'hello' attribute
return cls
class TryHello(object, metaclass=HelloMeta):
def greet(self):
self.hello()
greeter = TryHello()
greeter.greet()
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Зачем нужно работать с мета-классами?`
#
# - На самом деле, это не нужно почти никогда, лучше использовать декораторы<br><br>
# - Бывает полезно в тех случаях, когда нужно штамповать классы с заданными свойствами или при отсутствии информации о деталях класса до момента выполнения кода<br><br>
# - **Пример**:
# - вы определили класс данных с методами обработки, зависящими от формата
# - класс имеет метакласс, который определяется аргументами программы
# - в аргументах передаются различные форматы и методы обработки
# - мета-класс в зависимости от них переопределяет методы вашего класса<br><br>
#
# - **Пример**: генерация API, в текущем контексте может требоваться, чтобы все методы ваших классов были в верхнем регистре<br><br>
# + [markdown] slideshow={"slide_type": "slide"}
# ### `Спасибо за внимание!`
| 29,779 |
/tp2/TP2_bandits_sinRespuestas.ipynb | 89745e0f98f388933bbfb064a7ac03c4e228f8e5 | [] | no_license | LecJackS/Procesos-Markovianos-Para-el-Aprendizaje-Automatico-2019-1C | https://github.com/LecJackS/Procesos-Markovianos-Para-el-Aprendizaje-Automatico-2019-1C | 1 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 6,853 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
class BernoulliBanditEnv(object):
# Clase que define el environment donde el reward es 0 o 1 dependiendo de una probabilidad p.
def __init__(self, num_arms=10, p=None):
self.num_arms = num_arms
self.actions = np.arange(num_arms) # define set of actions
if len(p)==1:
self.p = np.random.beta(0.5, 0.5, size=num_arms)
elif len(p) == num_arms:
self.p = p
else:
raise Exception('Number of probabilities ({}) does not correspond to number of arms ({}).'.format(len(q), num_arms))
self.best_action = np.argmax(self.p) # La mejor accion dado el enviroenment
def reward(self, action):
return np.random.binomial(1, p=self.p[action])
class Agent(object):
# Clase que define al agente. Cada agente cuenta con una regla de decisión y una regla de aprendizaje.
def __init__(self, learning_rule, decision_rule, param=None):
self.decision_rule = decision_rule
self.learning_rule = learning_rule
if decision_rule == "epsilon-greedy":
self.epsilon = param["epsilon"]
if decision_rule == "UCB":
self.UCB_param = param["UCB_param"]
def environment(self, env, init_q):
# inicializa el environment
self.env = env
self.k = env.num_arms
self.actions = np.arange(self.k)
self.act_count = np.zeros(self.k)
self.iteration = 0
if self.learning_rule == "BayesianBetaPrior":
self.alpha = np.random.uniform(size=self.k)
self.beta = np.random.uniform(size=self.k)
if len(init_q) == self.k:
self.q_estimate = init_q
else:
raise Exception('Number of initial values ({}) does not correspond to number of arms ({}).'.format(len(init_q), self.k))
def learn(self, a, r):
# dada una acción y una recompenza, actualiza la value function.
if self.learning_rule == "averaging":
self.q_estimate[a] += 1/self.act_count[a] * (r - self.q_estimate[a])
if self.learning_rule == "BayesianBetaPrior":
self.alpha[a] += r
self.beta[a] += 1 - r
def act(self):
# realiza una acción.
self.iteration += 1
if self.decision_rule == "greedy":
# COMPLETAR
pass
if self.decision_rule == "epsilon-greedy":
# COMPLETAR
pass
if self.decision_rule == "UCB":
# COMPLETAR
pass
if self.decision_rule == "Thompson":
# COMPLETAR
pass
self.act_count[selected_action] += 1
return selected_action
def simulateBandits(agents, narms, initp=None, initq=None, repetitions=1000, N=100):
# función que realiza las simulaciones de los agentes. Se define el número de repeticiones que seran
# promediadas y el número de pasos N. agents es una lista de agentes.
rewards = np.zeros((len(agents), repetitions, N))
bestarm = np.zeros((len(agents), repetitions, N))
for i, agent in enumerate(agents):
for j in np.arange(repetitions):
environment = BernoulliBanditEnv(num_arms=narms, p=initp)
agent.environment(environment, initq if not(initq == None) else np.zeros(narms))
for n in np.arange(N):
a = agent.act()
r = environment.reward(a)
agent.learn(a, r)
rewards[i, j, n] = r
bestarm[i, j, n] = 1 if a == environment.best_action else 0
return np.squeeze(np.mean(rewards, axis=1)), np.squeeze(np.mean(bestarm, axis=1))
def plot_results(agents, actions, rewards):
# COMPLETAR
pass
# # Ejercicios:
#
# 1) Completar pertinentemente el código donde diga "COMPLETAR".
#
# 2) Realizar simulaciones con un bandit de 2 brazos (P = [0.4, 0.8]) para cada una de las reglas de decisión y graficar la recompensa promedio, la recompensa acumulada y el porcentaje de veces que fue elegido el mejor brazo en función de los pasos. Interprete los resultados.
#
# 3) Realizar simulaciones con un bandit de 10 brazos (P = [0.2, 0.2, 0.4, 0.2, 0.2, 0.2, 0.2, 0.8, 0.2, 0.2]) para cada una de las reglas de decisión y graficar la recompensa promedio, la recompensa acumulada y el porcentaje de veces que fue elegido el mejor brazo en función de los pasos. Interprete los resultados.
#
# 4) Estudie la dependencia del hiperparametro epsilon en la regla de decisión epsilon-greedy.
#
| 4,978 |
/user-churn-prediction-EMR.ipynb | 11e7a6b4b652b88de467493ff8bd9baa7dd79675 | [] | no_license | LanmingMa/user-churn-prediction | https://github.com/LanmingMa/user-churn-prediction | 0 | 0 | null | null | null | null | Jupyter Notebook | false | false | .py | 88,489 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MAP and MPE Inference
#
# **COMP9418-19T3, W09 Tutorial**
#
# - Instructor: Gustavo Batista
# - School of Computer Science and Engineering, UNSW Sydney
# - Last Update 5th Novermber at 18:00, 2020
# $$
# % macros
# \newcommand{\indep}{\perp \!\!\!\perp}
# $$
# In this week's tutorial, we will implement MPE and MAP algorithms with variable elimination. We will extend our factor representation to store MAP and MPE instantiations. We will build our code using implementations from previous tutorials, in particular, inference with variable elimination and Markov chain models.
# ## Technical prerequisites
#
# You will need certain packages installed to run this notebook.
#
# If you are using ``conda``'s default
# [full installation](https://conda.io/docs/install/full.html),
# these requirements should all be satisfied already.
# Once we have done all that, we
# import some useful modules for later use.
# +
# Make division default to floating-point, saving confusion
from __future__ import division
from __future__ import print_function
# combinatorics
from itertools import product, combinations, permutations
# ordered dictionaries are useful for keeping ordered sets of varibles
from collections import OrderedDict as odict
# table formating for screen output
from tabulate import tabulate
# math library to get access to math.inf
# -
# ## The Medical Conditional Network
#
# In this tutorial, we will implement MPE and MAP inference using variable elimination (VE). To test our implementations, we will use the same example of a Bayesian network from the lecture. Therefore, we will be able to compare our results with those obtained in the slides.
#
# The Bayesian network has the following structure:
#
# ![](./img/Simple_net.png)
#
# In summary, it describes a problem in which we have a population of males and females ($S$) that can suffer from a medical condition ($C$) that is more likely in males than in females. There are two diagnostic tests for $𝐶$, named $𝑇_1$ and $𝑇_2$. The $𝑇_2$ test is more effective on females, and both tests are equally effective on males. Finally, we have a deterministic variable indicating if tests $T_1$ and $T_2$ have the same outcome.
#
# The next cell defines the factors for each CPT and the outcome space for all five variables.
# +
factors = {
'S': {
'dom': ('S'),
'table': odict([
(('male',), 0.55),
(('female',), 0.45),
]),
},
'C': {
'dom': ('S', 'C'),
'table': odict([
(('male', 'yes'), 0.05),
(('male', 'no'), 0.95),
(('female', 'yes'), .01),
(('female', 'no'), .99),
])
},
'T1': {
'dom': ('C', 'T1'),
'table': odict([
(('yes', '+ve'), 0.80),
(('yes', '-ve'), 0.20),
(('no', '+ve'), .20),
(('no', '-ve'), .80),
])
},
'T2': {
'dom': ('S', 'C', 'T2'),
'table': odict([
(('male', 'yes', '+ve'), 0.80),
(('male', 'yes', '-ve'), 0.20),
(('male', 'no', '+ve'), 0.20),
(('male', 'no', '-ve'), 0.80),
(('female', 'yes', '+ve'), .95),
(('female', 'yes', '-ve'), .05),
(('female', 'no', '+ve'), .05),
(('female', 'no', '-ve'), .95),
])
},
'A': {
'dom': ('T1', 'T2', 'A'),
'table': odict([
(('+ve', '+ve','yes'), 1),
(('+ve', '+ve','no'), 0),
(('+ve', '-ve','yes'), 0),
(('+ve', '-ve','no'), 1),
(('-ve', '+ve','yes'), 0),
(('-ve', '+ve','no'), 1),
(('-ve', '-ve','yes'), 1),
(('-ve', '-ve','no'), 0),
])
},
}
outcomeSpace = dict(
S=('male','female'),
C=('yes','no'),
A=('yes','no'),
T1=('+ve','-ve'),
T2=('+ve','-ve')
)
# -
# ## The Extended Factor
#
# An essential part of answering MAP and MPE queries is recovering the instantiation with maximal probability. Remember that an MPE probability query has the following definition:
#
# $$MPE_P(\textbf{e}) = max_{\textbf{q}}P(\textbf{q},\textbf{e})$$
#
# where, $\textbf{q}$ are all variables in the network not present in $\textbf{e}$. Therefore, we are looking for an instantiation $\textbf{q}$ with maximal probability. The $MPE_P$ query returns such maximal probability, i.e., a numerical value.
#
# The MPE instantiation query returns the instantiation, that is, values associated with each variable in $\textbf{q}$. We define the MPE instantiation query as
#
# $$MPE(e) = argmax_{\textbf{q}}P(\textbf{q},\textbf{e})$$
#
# We can use similar definitions for MAP queries. Remember that the main difference between MAP and MPE is that MAP queries typically involve a subset of the network variables. Therefore, MPE queries can be seen as a particular case of MAP queries.
#
# To keep track of the most likely instantiations, we can use extended factors. The general idea is simple. We record the value of the eliminated variable in a list when computing the maximization operation. The next figure from the slides illustrates the concept.
#
# ![](./img/Ext_factors.png)
#
# In this example, we are eliminating variable $S$ through the maximization operation. In the case of the instantiation $C=yes, T_2=ve$, the two candidates are $S=male$ with probability $P(S=male,C=yes,T_2=ve)=.0220$ and $S=female$ with probability $P(S=female,C=yes,T_2=ve)=.0043$. The largest probability comes from $S=male$ and this value is recorded in the exteded field of the factor.
#
# We need an extended factor representation. While there are many possibilities, we will use something similar to our table representation. Our requirements are the following:
#
# 1. We need one separated list of each possible combination of variable values in a factor.
# 2. We need to record the variable name as well as the value assigned to this variable, such as $A=yes$.
# 3. We need to start with an empty structure and add entries as we eliminate variables with maximization.
# 4. We need to merge two lists when we join two factors since each one will have its extended field. Notice, as each variable is eliminated only once, the two lists of variables will be disjoint.
#
# We implement the extended field by adding an `ext` key to the factor dictionary. This key has a similar structure to the `table` key. It is an `odict` object will all possible combinations of variable values in the factor domain. However, instead of storing a number (probability), we will store a dictionary.
#
# The next cell illustrates this one factor with the extended representation.
phi = {
'dom': ('C', 'T2'),
'table': odict([
(('yes', '+ve'), 0.0220),
(('yes', '-ve'), 0.0055),
(('no', '+ve'), 0.1045),
(('no', '-ve'), 0.4237),
]),
'ext': odict([
(('yes', '+ve'), dict(S='male')),
(('yes', '-ve'), dict(S='male')),
(('no', '+ve'), dict(S='male')),
(('no', '-ve'), dict(S='female')),
]),
}
# Let us start our code with a new version of the `printFactor` function. The new function prints the extended field if it is present in the factor. This code is a simple modification of the previous `printFactor` sub-routine, so we will provide it to you.
def printFactor(f):
"""
argument
`f`, a factor to print on screen. If the `ext` field is presented, it will be printed as an additional column
"""
# Create a empty list that we will fill in with the probability table entries
table = list()
# Iterate over all keys and probability values in the table
for key, item in f['table'].items():
# Convert the tuple to a list to be able to manipulate it
k = list(key)
# Append the probability value to the list with key values
k.append(item)
# NEW CODE
# if the 'ext' key is present in the factor, we apppend the ext dictionary converted to a list of items
if 'ext' in f:
k.append(list(f['ext'][key].items()))
# Append an entire row to the table
table.append(k)
# dom is used as table header. We need it converted to list
dom = list(f['dom'])
# Append a 'Pr' to indicate the probabity column
dom.append('Pr')
# NEW CODE
# Append a 'Ext' to indicate the extended column
dom.append('Ext')
print(tabulate(table,headers=dom,tablefmt='orgtbl'))
# We can test the new `printFactor` function. It works with the extended and non-extended factors.
printFactor(factors['C'])
print()
printFactor(phi)
# ## Maximize Operation
#
# Let us start with the `maximize` operation. We implemented this function in week 5 tutorial when coding the Viterbi algorithm. The next cell brigs this code. Your task is to adapt the implementation to store the extended factors.
#
# ### Exercise
#
# Adapt the `maximize` function in the next cell to update the `ext` dictionary with the name and the corresponding value of the maximized out variable. In your code, verify if the `ext` dictionary exists in the factor `f`. If it does not exist, start with an empty dictionary.
# +
def maximize(f, var, outcomeSpace):
"""
argument
`f`, factor to be maximized.
`var`, variable to be maximized out.
`outcomeSpace`, dictionary with the domain of each variable.
Returns a new factor f' with dom(f') = dom(f) - {var}. The `ext` field is updated to store the most likely instantiation
"""
# Let's make a copy of f domain and convert it to a list. We need a list to be able to modify its elements
new_dom = list(f['dom'])
new_dom.remove(var) # Remove var from the list new_dom by calling the method remove(). 1 line
table = list() # Create an empty list for table. We will fill in table from scratch. 1 line
for entries in product(*[outcomeSpace[node] for node in new_dom]):
m = -1; # Initialize the maximization variable m. 1 line
# We need to iterate over all possible outcomes of the variable var
for val in outcomeSpace[var]:
# To modify the tuple entries, we will need to convert it to a list
entriesList = list(entries)
# We need to insert the value of var in the right position in entriesList
entriesList.insert(f['dom'].index(var), val)
p = prob(f, *tuple(entriesList)) # Calculate the probability of factor f for entriesList. 1 line
m = max(m, p) # Maximize over all values of var by storing the max value in m. 1 line
# Create a new table entry with the multiplication of p1 and p2
table.append((entries, m))
return {'dom': tuple(new_dom), 'table': odict(table)}
################
# Test code
def prob(factor, *entry):
"""
argument
`factor`, a dictionary of domain and probability values,
`entry`, a list of values, one for each variable in the same order as specified in the factor domain.
Returns p(entry)
"""
return factor['table'][entry] # insert your code here, 1 line
printFactor(factors['T2'])
print()
f = maximize(factors['T2'], 'S', outcomeSpace)
printFactor(f)
print()
printFactor(maximize(f, 'T2', outcomeSpace))
# +
# Answer
def maximize(f, var, outcomeSpace):
"""
argument
`f`, factor to be maximized.
`var`, variable to be maximized out.
`outcomeSpace`, dictionary with the domain of each variable.
Returns a new factor f' with dom(f') = dom(f) - {var}
"""
# Let's make a copy of f domain and convert it to a list. We need a list to be able to modify its elements
new_dom = list(f['dom'])
new_dom.remove(var) # Remove var from the list new_dom by calling the method remove(). 1 line
# NEW CODE: new_ext is a list of variables names and values that will compose the exteded field of the factor
new_ext = list()
table = list() # Create an empty list for table. We will fill in table from scratch. 1 line
for entries in product(*[outcomeSpace[node] for node in new_dom]):
m = -1; # Initialize the maximization variable m. 1 line
# We need to iterate over all possible outcomes of the variable var
for val in outcomeSpace[var]:
# To modify the tuple entries, we will need to convert it to a list
entriesList = list(entries)
# We need to insert the value of var in the right position in entriesList
entriesList.insert(f['dom'].index(var), val)
p = prob(f, *tuple(entriesList)) # Calculate the probability of factor f for entriesList. 1 line
# NEW CODE: test to see if we have a new candidate for instantiation with maximum probability
if p > m:
# NEW CODE: Store the new maximum probability for instantiation
m = p
# NEW CODE: Store the value of the variable that is candidate to be instantiation with maximum probability
max_val = val
# NEW CODE: if the 'ext' field in present in the factor, we make a copy and will add values to it
if ('ext' in f):
max_ext = f['ext'][tuple(entriesList)].copy()
# NEW CODE: Otherwise, we start with an empty dictionary
else:
max_ext = dict()
# Create a new table entry with the multiplication of p1 and p2
table.append((entries, m))
# NEW CODE: Add a new entry to the dictionary: var = max_val
max_ext[var] = max_val
# NEW CODE: Append a line to the new_ext list with the updated extended dictionary
new_ext.append((entries, max_ext))
# NEW CODE: return a new factor with the `ext` field as well
return {'dom': tuple(new_dom), 'table': odict(table), 'ext': odict(new_ext)}
################
# Test code
def prob(factor, *entry):
"""
argument
`factor`, a dictionary of domain and probability values,
`entry`, a list of values, one for each variable in the same order as specified in the factor domain.
Returns p(entry)
"""
return factor['table'][entry]
printFactor(factors['T2'])
print()
f = maximize(factors['T2'], 'S', outcomeSpace)
printFactor(f)
print()
printFactor(maximize(f, 'T2', outcomeSpace))
# -
# If your code is correct, you should see the following output:
#
# ```
# | S | C | T2 | Pr |
# |--------+-----+------+------|
# | male | yes | +ve | 0.8 |
# | male | yes | -ve | 0.2 |
# | male | no | +ve | 0.2 |
# | male | no | -ve | 0.8 |
# | female | yes | +ve | 0.95 |
# | female | yes | -ve | 0.05 |
# | female | no | +ve | 0.05 |
# | female | no | -ve | 0.95 |
#
# | C | T2 | Pr | Ext |
# |-----+------+------+-------------------|
# | yes | +ve | 0.95 | [('S', 'female')] |
# | yes | -ve | 0.2 | [('S', 'male')] |
# | no | +ve | 0.2 | [('S', 'male')] |
# | no | -ve | 0.95 | [('S', 'female')] |
#
# | C | Pr | Ext |
# |-----+------+----------------------------------|
# | yes | 0.95 | [('S', 'female'), ('T2', '+ve')] |
# | no | 0.95 | [('S', 'female'), ('T2', '-ve')] |
# ```
# ## Join Operation with Extended Factors
#
# We also need to modify the `join` function to work with extended factors. If we have two factors with a non-empty `ext` dictionaries, we need to merge those dictionaries for each table entry.
#
# ### Exercise
#
# Implement a new version of the `join` operation that works with extended factors. The new 'join' operation must work with the following scenarios:
#
# 1. Join two factors with no extended factor fields. In this case, the resulting factor has an extended field with empty dictionaries.
# 2. Join a factor that has an extended factor field `e` with another one that does not have it. In this case, the resulting factor should have a copy of the extended field `e`.
# 3. Join two factors with existing extended factor fields. In this case, we need to merge the two dictionaries.
#
# We have started the code for you.
# +
def join(f1, f2, outcomeSpace):
"""
argument
`f1`, first factor to be joined.
`f2`, second factor to be joined.
`outcomeSpace`, dictionary with the domain of each variable
Returns a new factor with a join of f1 and f2. This version tests of the existance of `ext` field and merge them in the resulting factor
"""
# First, we need to determine the domain of the new factor. It will be union of the domain in f1 and f2
# But it is important to eliminate the repetitions
common_vars = list(f1['dom']) + list(set(f2['dom']) - set(f1['dom']))
# We will build a table from scratch, starting with an empty list. Later on, we will transform the list into a odict
table = list()
# ext stores a list similar to table but for the extended factor field. We start with an empty list and convert to a odict in the end
ext = None # Initialize ext with an empty list: 1 line
# Here is where the magic happens. The product iterator will generate all combinations of varible values
# as specified in outcomeSpace. Therefore, it will naturally respect observed values
for entries in product(*[outcomeSpace[node] for node in common_vars]):
# We need to map the entries to the domain of the factors f1 and f2
entryDict = dict(zip(common_vars, entries))
f1_entry = tuple((entryDict[var] for var in f1['dom']))
f2_entry = tuple((entryDict[var] for var in f2['dom']))
# Insert your code here
p1 = prob(f1, *f1_entry) # Use the fuction prob to calculate the probability in factor f1 for entry f1_entry
p2 = prob(f2, *f2_entry) # Use the fuction prob to calculate the probability in factor f2 for entry f2_entry
# Test if `ext` key exists in f1. If it does exist, make a copy of the dictionary for f1_entry
if None: # Test if `ext` key exists in the f1's extended factor: 1 line
e = None # Assign to e a copy of the ext dictionary for f1_entry: 1 line
else:
# Otherwise, initialize e with an empty dictionary
e = None # Assign an empty dictionary to e
# Test if `ext` key exists in f2. If it does exist, merge the ext dictionary for f2_entry with e
if None: # Test if `ext` key exists in the f2's extended factor: 1 line
None # Merge e and the ext dictionary for f2_entry. Use update: 1 line
# Create a new table entry with the multiplication of p1 and p2
table.append((entries, p1 * p2))
# Create a new ext table entry with the dictionary e
ext.append(None) # Append to ext a row with the entries values and the dictionary e: 1 line
return {'dom': tuple(common_vars), 'table': odict(table), 'ext': None} # Return ext as an odict object: 1 line
########################
# Test code
phi1 = {
'dom': ('A', 'B'),
'table': odict([
(('+a', '+b'), 0.3),
(('+a', '-b'), 0.7),
(('-a', '+b'), 0.2),
(('-a', '-b'), 0.8),
]),
'ext': odict([
(('+a', '+b'), dict(C='+c')),
(('+a', '-b'), dict(C='+c')),
(('-a', '+b'), dict(C='+c')),
(('-a', '-b'), dict(C='-c')),
]),
}
phi2 = {
'dom': ('B', 'D'),
'table': odict([
(('+b', '+d'), 0.1),
(('+b', '-d'), 0.9),
(('-b', '+d'), 0.4),
(('-b', '-d'), 0.6),
]),
'ext': odict([
(('+b', '+d'), dict(E='-e')),
(('+b', '-d'), dict(E='+e')),
(('-b', '+d'), dict(E='-e')),
(('-b', '-d'), dict(E='+e')),
]),
}
o = dict(
A=('+a','-a'),
B=('+b','-b'),
C=('+c','-c'),
D=('+d','-d'),
E=('+e','-e')
)
printFactor(join(phi1, phi2, o))
# +
# Answer
def join(f1, f2, outcomeSpace):
"""
argument
`f1`, first factor to be joined.
`f2`, second factor to be joined.
`outcomeSpace`, dictionary with the domain of each variable
Returns a new factor with a join of f1 and f2. This version tests of the existance of `ext` field and merge them in the resulting factor
"""
# First, we need to determine the domain of the new factor. It will be union of the domain in f1 and f2
# But it is important to eliminate the repetitions
common_vars = list(f1['dom']) + list(set(f2['dom']) - set(f1['dom']))
# We will build a table from scratch, starting with an empty list. Later on, we will transform the list into a odict
table = list()
# ext stores a list similar to table but for the extended factor field. We start with an empty list and convert to a odict in the end
ext = list()
# Here is where the magic happens. The product iterator will generate all combinations of varible values
# as specified in outcomeSpace. Therefore, it will naturally respect observed values
for entries in product(*[outcomeSpace[node] for node in common_vars]):
# We need to map the entries to the domain of the factors f1 and f2
entryDict = dict(zip(common_vars, entries))
f1_entry = tuple((entryDict[var] for var in f1['dom']))
f2_entry = tuple((entryDict[var] for var in f2['dom']))
# Insert your code here
p1 = prob(f1, *f1_entry) # Use the fuction prob to calculate the probability in factor f1 for entry f1_entry
p2 = prob(f2, *f2_entry) # Use the fuction prob to calculate the probability in factor f2 for entry f2_entry
# Test if `ext` key exists in f1. If it does exist, make a copy of the dictionary for f1_entry
if ('ext' in f1):
e = f1['ext'][tuple(f1_entry)].copy()
else:
# Otherwise, initialize e with an empty dictionary
e = dict()
# Test if `ext` key exists in f2. If it does exist, merge the dictionary for f2_entry with e
if ('ext' in f2):
e.update(f2['ext'][f2_entry])
# Create a new table entry with the multiplication of p1 and p2
table.append((entries, p1 * p2))
# Create a new ext table entry with the dictionary e
ext.append((entries, e))
return {'dom': tuple(common_vars), 'table': odict(table), 'ext': odict(ext)}
########################
# Test code
phi1 = {
'dom': ('A', 'B'),
'table': odict([
(('+a', '+b'), 0.3),
(('+a', '-b'), 0.7),
(('-a', '+b'), 0.2),
(('-a', '-b'), 0.8),
]),
'ext': odict([
(('+a', '+b'), dict(C='+c')),
(('+a', '-b'), dict(C='+c')),
(('-a', '+b'), dict(C='+c')),
(('-a', '-b'), dict(C='-c')),
]),
}
phi2 = {
'dom': ('B', 'D'),
'table': odict([
(('+b', '+d'), 0.1),
(('+b', '-d'), 0.9),
(('-b', '+d'), 0.4),
(('-b', '-d'), 0.6),
]),
'ext': odict([
(('+b', '+d'), dict(E='-e')),
(('+b', '-d'), dict(E='+e')),
(('-b', '+d'), dict(E='-e')),
(('-b', '-d'), dict(E='+e')),
]),
}
o = dict(
A=('+a','-a'),
B=('+b','-b'),
C=('+c','-c'),
D=('+d','-d'),
E=('+e','-e')
)
printFactor(join(phi1, phi2, o))
# -
# If your code is correct, you should see the following output:
#
# ```
# | A | B | D | Pr | Ext |
# |-----+-----+-----+------+----------------------------|
# | +a | +b | +d | 0.03 | [('C', '+c'), ('E', '-e')] |
# | +a | +b | -d | 0.27 | [('C', '+c'), ('E', '+e')] |
# | +a | -b | +d | 0.28 | [('C', '+c'), ('E', '-e')] |
# | +a | -b | -d | 0.42 | [('C', '+c'), ('E', '+e')] |
# | -a | +b | +d | 0.02 | [('C', '+c'), ('E', '-e')] |
# | -a | +b | -d | 0.18 | [('C', '+c'), ('E', '+e')] |
# | -a | -b | +d | 0.32 | [('C', '-c'), ('E', '-e')] |
# | -a | -b | -d | 0.48 | [('C', '-c'), ('E', '+e')] |
# ```
# ## MPE Variable Elimination
#
# We can now implement the MPE VE algorithm. Let us make it in two parts. The first one is the variable elimination part and the second one the query part. Therefore, we will be able to reuse the VE implementation from week 4.
#
# The MPE VE implementation is very similar to the `VE` function we did before.
#
# ### Exercise
#
# Implement the `MPE_VE` function based on the `VE` function from week 6. The main difference is that we now eliminate variables maximizing out instead of summing out.
# +
def MPE_VE(factors, order, outcomeSpace):
"""
argument
`factors`, a dictionary of factors, each factor is a dictionary of domain and probability values.
`order`, a list of variable names specifying an elimination order.
`outcomeSpace`, a dictionary with variable names and respective domains.
Returns a dictionary with non-eliminated factors. Variables are eliminated by maximization.
"""
# Let's make a copy of factors, so we can freely modify it without destroying the original dictionary
f = factors.copy()
# We process the factor in elimination order
for i, var in enumerate(order):
# This is the domain of the new factor. We use sets as it is handy to eliminate duplicate variables
newFactorDom = set()
# This is a list of factors that will be removed from f because they were joined with other factors
listFactorsRemove = list()
# This is a flag to indicate if we are processing the first factor
first = True
# Lets iterate over all factors
for f_id in f.keys():
# and select the ones that have the variable to be eliminated
if var in f[f_id]['dom']:
if first:
# We need this code since join requires two factors, so we save the first one in fx and wait for the next
fx = f[f_id]
first = False
else:
# Join fx and f[f_id] and save the result in fx
fx = join(fx, f[f_id], outcomeSpace)
# f_id was joined, so we will need to eliminate it from f later. Let's save that factor id for future removal
listFactorsRemove.append(f_id)
# Now, we need to remove var from the domain of the new factor doing a maximization
fx = None # Eliminate variable var from fx by doing maximization: 1 line
# Now, we remove all factors that we joined. We do it outside the for loop since it modifies the data structure
for f_id in listFactorsRemove:
del f[f_id]
# We will create a new factor with id equal a sequential number and insert it into f, so it can be used in future joins
f[i] = fx
return f
#########################
# Test code
printFactor(MPE_VE(factors, ('A', 'T1', 'T2','S', 'C'), outcomeSpace)[4])
# +
## Answer
def MPE_VE(factors, order, outcomeSpace):
"""
argument
`factors`, a dictionary of factors, each factor is a dictionary of domain and probability values.
`order`, a list of variable names specifying an elimination order.
`outcomeSpace`, a dictionary with variable names and respective domains.
Returns a dictionary with non-eliminated factors. Variables are eliminated by maximization.
"""
# Let's make a copy of factors, so we can freely modify it without destroying the original dictionary
f = factors.copy()
# We process the factor in elimination order
for i, var in enumerate(order):
# This is the domain of the new factor. We use sets as it is handy to eliminate duplicate variables
newFactorDom = set()
# This is a list of factors that will be removed from f because they were joined with other factors
listFactorsRemove = list()
# This is a flag to indicate if we are processing the first factor
first = True
# Lets iterate over all factors
for f_id in f.keys():
# and select the ones that have the variable to be eliminated
if var in f[f_id]['dom']:
if first:
# We need this code since join requires two factors, so we save the first one in fx and wait for the next
fx = f[f_id]
first = False
else:
# Join fx and f[f_id] and save the result in fx
fx = join(fx, f[f_id], outcomeSpace)
# f_id was joined, so we will need to eliminate it from f later. Let's save that factor id for future removal
listFactorsRemove.append(f_id)
# Now, we need to remove var from the domain of the new factor doing a maximization
fx = maximize(fx, var, outcomeSpace) # Eliminate variable var from fx by doing maximization: 1 line
# Now, we remove all factors that we joined. We do it outside the for loop since it modifies the data structure
for f_id in listFactorsRemove:
del f[f_id]
# We will create a new factor with id equal a sequential number and insert it into f, so it can be used in future joins
f[i] = fx
return f
#########################
# Test code
printFactor(MPE_VE(factors, ('A', 'T1', 'T2','S', 'C'), outcomeSpace)[4])
# -
# If your code is correct, you should see the following output:
#
# ```
# | Pr | Ext |
# |---------+----------------------------------------------------------------------------|
# | 0.33858 | [('A', 'yes'), ('T1', '-ve'), ('T2', '-ve'), ('S', 'female'), ('C', 'no')] |
# ````
#
# This result matches with the one we obtained in slide 27 from lecture 14.
# ## MPE Query
#
# Now that we have MPE VE, we can easily implement MPE Query. The function `MPE_query` receives as argument a dictionary `q_evi` with the evidence information. After setting the evidence according to the entries in this dictionary, `MPE_query` calls `MPE_VE` to eliminate all variables according to a provided elimination `order`.
#
# ### Exercise
#
# Implement the function `MPE_query`. We have provided most of the code for you.
# +
def MPE_query(factors, order, outcomeSpace, **q_evi):
"""
argument
`factors`, a dictionary of factors
`order`, a list with variable elimination order
`outcomeSpace`, dictionary will variable domains
`q_evi`, dictionary of evidence in the form of variables names and values
Returns a new factor with MPE(e)
"""
# Let's make a copy of these structures, since we will reuse the variable names
outSpace = outcomeSpace.copy()
# First, we set the evidence
for var_evi, e in q_evi.items():
outSpace = None # Set the evidence according to q_evi. Use the evidence function: 1 line
# Call MPE_VE to eliminate all variables according to the provided elimination order
f = None # Call MPE_VE to eliminate variables using maximization: 1 line
# The remaining code will join all remaining factors in f into a single factor fx
first = True
for f_id in f.keys():
if first:
# We need this code since join requires two factors, so we save the first one in fx and wait for the next
fx = f[f_id]
first = False
else:
# Join fx and f[f_id] and save the result in fx
fx = join(fx, f[f_id], outSpace)
return fx
#########################
# Test code
#########################
def evidence(var, e, outcomeSpace):
"""
argument
`var`, a valid variable identifier.
`e`, the observed value for var.
`outcomeSpace`, dictionary with the domain of each variable
Returns dictionary with a copy of outcomeSpace with var = e
"""
newOutcomeSpace = outcomeSpace.copy() # Make a copy of outcomeSpace with a copy to method copy(). 1 line
newOutcomeSpace[var] = (e,) # Replace the domain of variable var with a tuple with a single element e. 1 line
return newOutcomeSpace
printFactor(MPE_query(factors, ('A', 'T1', 'T2','S','C'), outcomeSpace, A='yes'))
# +
#Answer
def MPE_query(factors, order, outcomeSpace, **q_evi):
"""
argument
`factors`, a dictionary of factors
`order`, a list with variable elimination order
`outcomeSpace`, dictionary will variable domains
`q_evi`, dictionary of evidence in the form of variables names and values
Returns a new factor with MPE(e)
"""
# Let's make a copy of these structures, since we will reuse the variable names
outSpace = outcomeSpace.copy()
# First, we set the evidence
for var_evi, e in q_evi.items():
outSpace = evidence(var_evi, e, outSpace) # Set the evidence according to q_evi. Use the evidence function: 1 line
# Call MPE_VE to eliminate all variables according to the provided elimination order
f = MPE_VE(factors, order, outSpace) # Call MPE_VE to eliminate variables using maximization: 1 line
# The remaining code will join all remaining factors in f into a single factor fx
first = True
for f_id in f.keys():
if first:
# We need this code since join requires two factors, so we save the first one in fx and wait for the next
fx = f[f_id]
first = False
else:
# Join fx and f[f_id] and save the result in fx
fx = join(fx, f[f_id], outSpace)
return fx
#########################
# Test code
#########################
def evidence(var, e, outcomeSpace):
"""
argument
`var`, a valid variable identifier.
`e`, the observed value for var.
`outcomeSpace`, dictionary with the domain of each variable
Returns dictionary with a copy of outcomeSpace with var = e
"""
newOutcomeSpace = outcomeSpace.copy() # Make a copy of outcomeSpace with a copy to method copy(). 1 line
newOutcomeSpace[var] = (e,) # Replace the domain of variable var with a tuple with a single element e. 1 line
return newOutcomeSpace
printFactor(MPE_query(factors, ('A', 'T1', 'T2','S','C'), outcomeSpace, A='yes'))
# -
# If your code is correct, you should see the following output:
#
# ```
# | Pr | Ext |
# |---------+----------------------------------------------------------------------------|
# | 0.33858 | [('A', 'yes'), ('T1', '-ve'), ('T2', '-ve'), ('S', 'female'), ('C', 'no')] |
# ````
#
# This output matches the results presented in slide 43 of lecture 14.
# ## MAP Variable Elimination
#
# Let us implement the MAP VE algorithm in the same steps we did for MPE. We start with a `MAP_VE` function that is very similar to the `MPE_VE` we just implemented.
#
# The main difference between then is that `MAP_VE` receives an additional argument `map_vars` with a list of MAP variables. To provide the correct output, `map_vars` should be the last variables of `order`. However, we will not enforce that constraint. Calling `map_vars` when `map_vars` are not the last variables in `order` has its utility since the output will be an upper bound to the MAP probability.
#
# MAP VE should remove all variables in `order`. Differently from MPE VE, non-MAP variables are eliminated using summation while MAP variables are eliminated using maximization.
#
# ### Exercise
#
# Implement the `MAP_VE` function. You can use the `MPE_VE` function implemented before in the tutorial. Remember to eliminate MAP and non-MAP variables with maximization and summation, respectively.
#
# We create a stub for your function. You will implement most of the code. Use `MPE_VE` as a starting point.
# +
def MAP_VE(factors, order, map_vars, outcomeSpace):
"""
argument
`factors`, a dictionary of factors, each factor is a dictionary of domain and probability values,
`order`, a list of variable names specifying an elimination order,
`map_vars`, a list of MAP variables. Although the code does not enforce that, these variables should be the last ones in `order`
`outcomeSpace`, a dictionary with variable names and respective domains.
Returns a dictionary with non-eliminated factors
"""
None
##########################
# Test code
def marginalize(f, var, outcomeSpace):
"""
argument
`f`, factor to be marginalized.
`var`, variable to be summed out.
`outcomeSpace`, dictionary with the domain of each variable
Returns a new factor f' with dom(f') = dom(f) - {var}
"""
# Let's make a copy of f domain and convert it to a list. We need a list to be able to modify its elements
new_dom = list(f['dom'])
new_dom.remove(var) # Remove var from the list new_dom by calling the method remove(). 1 line
table = list() # Create an empty list for table. We will fill in table from scratch. 1 line
for entries in product(*[outcomeSpace[node] for node in new_dom]):
s = 0; # Initialize the summation variable s. 1 line
# We need to iterate over all possible outcomes of the variable var
for val in outcomeSpace[var]:
# To modify the tuple entries, we will need to convert it to a list
entriesList = list(entries)
# We need to insert the value of var in the right position in entriesList
entriesList.insert(f['dom'].index(var), val)
p = prob(f, *tuple(entriesList)) # Calculate the probability of factor f for entriesList. 1 line
s = s + p # Sum over all values of var by accumulating the sum in s. 1 line
# Create a new table entry with the multiplication of p1 and p2
table.append((entries, s))
return {'dom': tuple(new_dom), 'table': odict(table)}
printFactor(MAP_VE(factors, ('A', 'T1', 'T2', 'S', 'C'), ('S', 'C'), outcomeSpace)[4])
# +
## Answer
def MAP_VE(factors, order, map_vars, outcomeSpace):
"""
argument
`factors`, a dictionary of factors, each factor is a dictionary of domain and probability values,
`order`, a list of variable names specifying an elimination order,
`map_vars`, a list of MAP variables. Although the code does not enforce that, these variables should be the last ones in `order`
`outcomeSpace`, a dictionary with variable names and respective domains.
Returns a dictionary with non-eliminated factors
"""
# Let's make a copy of factors, so we can freely modify it without destroying the original dictionary
f = factors.copy()
# We process the factor in elimination order
for i, var in enumerate(order):
# This is the domain of the new factor. We use sets as it is handy to eliminate duplicate variables
newFactorDom = set()
# This is a list of factors that will be removed from f because they were joined with other factors
listFactorsRemove = list()
# This is a flag to indicate if we are processing the first factor
first = True
# Lets iterate over all factors
for f_id in f.keys():
# and select the ones that have the variable to be eliminated
if var in f[f_id]['dom']:
if first:
# We need this code since join requires two factors, so we save the first one in fx and wait for the next
fx = f[f_id]
first = False
else:
# Join fx and f[f_id] and save the result in fx
fx = join(fx, f[f_id], outcomeSpace)
# f_id was joined, so we will need to eliminate it from f later. Let's save that factor id for future removal
listFactorsRemove.append(f_id)
# Now, we need to remove var from the domain of the new factor doing a marginalization
if (var in map_vars):
fx = maximize(fx, var, outcomeSpace)
else:
fx = marginalize(fx, var, outcomeSpace)
# Now, we remove all factors that we joined in the simulation. We do it outside the for loop since it modifies the data structure
for f_id in listFactorsRemove:
del f[f_id]
# We will create a new factor with id equal a sequential number and insert it into f, so it can be used in future joins
f[i] = fx
return f
##########################
# Test code
def marginalize(f, var, outcomeSpace):
"""
argument
`f`, factor to be marginalized.
`var`, variable to be summed out.
`outcomeSpace`, dictionary with the domain of each variable
Returns a new factor f' with dom(f') = dom(f) - {var}
"""
# Let's make a copy of f domain and convert it to a list. We need a list to be able to modify its elements
new_dom = list(f['dom'])
new_dom.remove(var) # Remove var from the list new_dom by calling the method remove(). 1 line
table = list() # Create an empty list for table. We will fill in table from scratch. 1 line
for entries in product(*[outcomeSpace[node] for node in new_dom]):
s = 0; # Initialize the summation variable s. 1 line
# We need to iterate over all possible outcomes of the variable var
for val in outcomeSpace[var]:
# To modify the tuple entries, we will need to convert it to a list
entriesList = list(entries)
# We need to insert the value of var in the right position in entriesList
entriesList.insert(f['dom'].index(var), val)
p = prob(f, *tuple(entriesList)) # Calculate the probability of factor f for entriesList. 1 line
s = s + p # Sum over all values of var by accumulating the sum in s. 1 line
# Create a new table entry with the multiplication of p1 and p2
table.append((entries, s))
return {'dom': tuple(new_dom), 'table': odict(table)}
printFactor(MAP_VE(factors, ('A', 'T1', 'T2', 'S', 'C'), ('S', 'C'), outcomeSpace)[4])
# -
# If your output is correct, you should see the following output:
#
# ```
# | Pr | Ext |
# |--------+------------------------------|
# | 0.5225 | [('S', 'male'), ('C', 'no')] |
# ```
# ## MAP Query
#
# Now that we have MAP VE, we can also implement MAP Query. The function `MAP_query` is very similar to `MPE_query` with the difference it uses `MAP_VE` instead of `MPE_VE`. Therefore, it will eliminate MAP and non-MAP variables differently.
#
# ### Exercise
#
# Implement the function `MAP_query`. We have provided a stub for you. Use the function `MPE_query` as a starting point.
# +
def MAP_query(factors, order, outcomeSpace, map_vars, **q_evi):
"""
argument
`factors`, a dictionary of factors
`order`, a list with variable elimination order
`outcomeSpace`, dictionary will variable domains
`map_vars`, a list of MAP variables. Although the code does not enforce that, these variables should be the last ones in `order`
`q_evi`, dictionary of evidence in the form of variables names and values
Returns a new factor with P(Q, e)
"""
None
#########################
# Test code
printFactor(MAP_query(factors, ('A', 'T1', 'T2','S','C'), outcomeSpace, ('S', 'C'), A='yes'))
# +
#Answer
def MAP_query(factors, order, outcomeSpace, map_vars, **q_evi):
"""
argument
`factors`, a dictionary of factors
`order`, a list with variable elimination order
`outcomeSpace`, dictionary will variable domains
`map_vars`, a list of MAP variables. Although the code does not enforce that, these variables should be the last ones in `order`
`q_evi`, dictionary of evidence in the form of variables names and values
Returns a new factor with P(Q, e) or P(Q|e)
"""
# Let's make a copy of these structures, since we will reuse the variable names
outSpace = outcomeSpace.copy()
# First, we set the evidence
for var_evi, e in q_evi.items():
outSpace = evidence(var_evi, e, outSpace)
f = MAP_VE(factors, order, map_vars, outSpace)
first = True
for f_id in f.keys():
if first:
# We need this code since join requires two factors, so we save the first one in fx and wait for the next
fx = f[f_id]
first = False
else:
# Join fx and f[f_id] and save the result in fx
fx = join(fx, f[f_id], outSpace)
return fx
#########################
# Test code
printFactor(MAP_query(factors, ('A', 'T1', 'T2','S','C'), outcomeSpace, ('S', 'C'), A='yes'))
# -
# If your code is correct, you should see the following output:
#
# ```
# | Pr | Ext |
# |--------+------------------------------|
# | 0.3553 | [('S', 'male'), ('C', 'no')] |
# ```
#
# This matches the results shown in slide 78 of lecture 12.
# That is all for today. See you next week!
| 46,011 |