repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
keras-team/keras-io
examples/vision/ipynb/mnist_convnet.ipynb
apache-2.0
import numpy as np from tensorflow import keras from tensorflow.keras import layers """ Explanation: Simple MNIST convnet Author: fchollet<br> Date created: 2015/06/19<br> Last modified: 2020/04/21<br> Description: A simple convnet that achieves ~99% test accuracy on MNIST. Setup End of explanation """ # Model / data parameters num_classes = 10 input_shape = (28, 28, 1) # the data, split between train and test sets (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Scale images to the [0, 1] range x_train = x_train.astype("float32") / 255 x_test = x_test.astype("float32") / 255 # Make sure images have shape (28, 28, 1) x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) print("x_train shape:", x_train.shape) print(x_train.shape[0], "train samples") print(x_test.shape[0], "test samples") # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) """ Explanation: Prepare the data End of explanation """ model = keras.Sequential( [ keras.Input(shape=input_shape), layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D(pool_size=(2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D(pool_size=(2, 2)), layers.Flatten(), layers.Dropout(0.5), layers.Dense(num_classes, activation="softmax"), ] ) model.summary() """ Explanation: Build the model End of explanation """ batch_size = 128 epochs = 15 model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1) """ Explanation: Train the model End of explanation """ score = model.evaluate(x_test, y_test, verbose=0) print("Test loss:", score[0]) print("Test accuracy:", score[1]) """ Explanation: Evaluate the trained model End of explanation """
tensorflow/docs-l10n
site/ja/tfx/tutorials/tfx/components_keras.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2021 The TensorFlow Authors. End of explanation """ import sys if 'google.colab' in sys.modules: !pip install --upgrade pip """ Explanation: TFX Keras ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฎใƒใƒฅใƒผใƒˆใƒชใ‚ขใƒซ TensorFlow Extended (TFX) ใฎๅ„ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฎ็ดนไป‹ ๆณจ๏ผšใ“ใฎไพ‹ใฏใ€Jupyter ใ‚นใ‚ฟใ‚คใƒซใฎใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใงไปŠใ™ใๅฎŸ่กŒใงใใพใ™ใ€‚ใ‚ปใƒƒใƒˆใ‚ขใƒƒใƒ—ใฏๅฟ…่ฆใ‚ใ‚Šใพใ›ใ‚“ใ€‚ใ€ŒGoogle Colab ใงๅฎŸ่กŒใ€ใ‚’ใ‚ฏใƒชใƒƒใ‚ฏใ™ใ‚‹ใ ใ‘ใงใ™ <div class="devsite-table-wrapper"><table class="tfo-notebook-buttons" align="left"> <td><a target="_blank" href="https://www.tensorflow.org/tfx/tutorials/tfx/components_keras"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org ใง่กจ็คบ</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/tfx/tutorials/tfx/components_keras.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab ใงๅฎŸ่กŒ</a></td> <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/tfx/tutorials/tfx/components_keras.ipynb"> <img width="32px" src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub ใงใ‚ฝใƒผใ‚นใ‚’่กจ็คบ</a></td> <td><a target="_blank" href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/tfx/tutorials/tfx/components_keras.ipynb"> <img width="32px" src="https://www.tensorflow.org/images/download_logo_32px.png">ใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใ‚’ใƒ€ใ‚ฆใƒณใƒญใƒผใƒ‰</a></td> </table></div> ใ“ใฎ Colab ใƒ™ใƒผใ‚นใฎใƒใƒฅใƒผใƒˆใƒชใ‚ขใƒซใงใฏใ€TensorFlow Extended (TFX) ใฎใใ‚Œใžใ‚Œใฎ็ต„ใฟ่พผใฟใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใ‚’ใ‚คใƒณใ‚ฟใƒฉใ‚ฏใƒ†ใ‚ฃใƒ–ใซ่ชฌๆ˜Žใ—ใพใ™ใ€‚ ใ“ใ“ใงใฏใƒ‡ใƒผใ‚ฟใฎๅ–ใ‚Š่พผใฟใ‹ใ‚‰ใƒขใƒ‡ใƒซใฎใƒ—ใƒƒใ‚ทใƒฅใ€ใ‚ตใƒผใƒ“ใƒณใ‚ฐใพใงใ€ใ‚จใƒณใƒ‰ ใƒ„ใƒผ ใ‚จใƒณใƒ‰ใฎๆฉŸๆขฐๅญฆ็ฟ’ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใฎใ™ในใฆใฎใ‚นใƒ†ใƒƒใƒ—ใ‚’่ฆ‹ใฆใ„ใใพใ™ใ€‚ ๅฎŒไบ†ใ—ใŸใ‚‰ใ€ใ“ใฎใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใฎใ‚ณใƒณใƒ†ใƒณใƒ„ใ‚’ TFX ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณ ใ‚ฝใƒผใ‚น ใ‚ณใƒผใƒ‰ใจใ—ใฆ่‡ชๅ‹•็š„ใซใ‚จใ‚ฏใ‚นใƒใƒผใƒˆใงใใพใ™ใ€‚ใ“ใ‚Œใฏใ€Apache Airflow ใŠใ‚ˆใณ Apache Beam ใจใ‚ชใƒผใ‚ฑใ‚นใƒˆใƒฌใƒผใ‚ทใƒงใƒณใงใใพใ™ใ€‚ ๆณจๆ„: ใ“ใฎใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใฏใ€TFX ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใงใฎใƒใ‚คใƒ†ใ‚ฃใƒ– Keras ใƒขใƒ‡ใƒซใฎไฝฟ็”จใ‚’็คบใ—ใฆใ„ใพใ™ใ€‚TFX ใฏ TensorFlow 2 ใƒใƒผใ‚ธใƒงใƒณใฎ Keras ใฎใฟใ‚’ใ‚ตใƒใƒผใƒˆใ—ใพใ™ใ€‚ ่ƒŒๆ™ฏๆƒ…ๅ ฑ ใ“ใฎใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใฏใ€Jupyter/Colab ็’ฐๅขƒใง TFX ใ‚’ไฝฟ็”จใ™ใ‚‹ๆ–นๆณ•ใ‚’็คบใ—ใฆใ„ใพใ™ใ€‚ ใ“ใ“ใงใฏใ€ใ‚คใƒณใ‚ฟใƒฉใ‚ฏใƒ†ใ‚ฃใƒ–ใชใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใงใ‚ทใ‚ซใ‚ดใฎใ‚ฟใ‚ฏใ‚ทใƒผใฎไพ‹ใ‚’่ฆ‹ใฆใ„ใใพใ™ใ€‚ TFX ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใฎๆง‹้€ ใซๆ…ฃใ‚Œใ‚‹ใฎใซใฏใ€ใ‚คใƒณใ‚ฟใƒฉใ‚ฏใƒ†ใ‚ฃใƒ–ใชใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใงไฝœๆฅญใ™ใ‚‹ใฎใŒไพฟๅˆฉใงใ™ใ€‚็‹ฌ่‡ชใฎใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใ‚’่ปฝ้‡ใฎ้–‹็™บ็’ฐๅขƒใจใ—ใฆ้–‹็™บใ™ใ‚‹ๅ ดๅˆใซใ‚‚ๅฝน็ซ‹ใกใพใ™ใŒใ€ใ‚คใƒณใ‚ฟใƒฉใ‚ฏใƒ†ใ‚ฃใƒ– ใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใฎใ‚ชใƒผใ‚ฑใ‚นใƒˆใƒฌใƒผใ‚ทใƒงใƒณใจใƒกใ‚ฟใƒ‡ใƒผใ‚ฟ ใ‚ขใƒผใƒ†ใ‚ฃใƒ•ใ‚กใ‚ฏใƒˆใธใฎใ‚ขใ‚ฏใ‚ปใ‚นๆ–นๆณ•ใซใฏ้•ใ„ใŒใ‚ใ‚‹ใฎใงๆณจๆ„ใ—ใฆใใ ใ•ใ„ใ€‚ ใ‚ชใƒผใ‚ฑใ‚นใƒˆใƒฌใƒผใ‚ทใƒงใƒณ TFX ใฎๅฎŸ็จผๅƒใƒ‡ใƒ—ใƒญใ‚คใƒกใƒณใƒˆใงใฏใ€Apache Airflowใ€Kubeflow Pipelinesใ€Apache Beam ใชใฉใฎใ‚ชใƒผใ‚ฑใ‚นใƒˆใƒฌใƒผใ‚ฟใƒผใ‚’ไฝฟ็”จใ—ใฆใ€TFX ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฎไบ‹ๅ‰ๅฎš็พฉๆธˆใฟใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณ ใ‚ฐใƒฉใƒ•ใ‚’ใ‚ชใƒผใ‚ฑใ‚นใƒˆใƒฌใƒผใ‚ทใƒงใƒณใ—ใพใ™ใ€‚ใ‚คใƒณใ‚ฟใƒฉใ‚ฏใƒ†ใ‚ฃใƒ–ใชใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใงใฏใ€ใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏ่‡ชไฝ“ใŒใ‚ชใƒผใ‚ฑใ‚นใƒˆใƒฌใƒผใ‚ฟใƒผใงใ‚ใ‚Šใ€ใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏ ใ‚ปใƒซใ‚’ๅฎŸ่กŒใ™ใ‚‹ใจใใซใใ‚Œใžใ‚Œใฎ TFX ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใ‚’ๅฎŸ่กŒใ—ใพใ™ใ€‚ ใƒกใ‚ฟใƒ‡ใƒผใ‚ฟ TFX ใฎๅฎŸ็จผๅƒใƒ‡ใƒ—ใƒญใ‚คใƒกใƒณใƒˆใงใฏใ€ML Metadata๏ผˆMLMD๏ผ‰API ใ‚’ไป‹ใ—ใฆใƒกใ‚ฟใƒ‡ใƒผใ‚ฟใซใ‚ขใ‚ฏใ‚ปใ‚นใ—ใพใ™ใ€‚MLMD ใฏใ€ใƒกใ‚ฟใƒ‡ใƒผใ‚ฟ ใƒ—ใƒญใƒ‘ใƒ†ใ‚ฃใ‚’ MySQL ใ‚„ SQLite ใชใฉใฎใƒ‡ใƒผใ‚ฟใƒ™ใƒผใ‚นใซๆ ผ็ดใ—ใ€ใƒกใ‚ฟใƒ‡ใƒผใ‚ฟ ใƒšใ‚คใƒญใƒผใƒ‰ใ‚’ใƒ•ใ‚กใ‚คใƒซ ใ‚ทใ‚นใƒ†ใƒ ใชใฉใฎๆฐธ็ถšใ‚นใƒˆใ‚ขใซไฟๅญ˜ใ—ใพใ™ใ€‚ใ‚คใƒณใ‚ฟใƒฉใ‚ฏใƒ†ใ‚ฃใƒ–ใชใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใงใฏใ€ใƒ—ใƒญใƒ‘ใƒ†ใ‚ฃใจใƒšใ‚คใƒญใƒผใƒ‰ใฎไธกๆ–นใŒใ€Jupyter ใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใพใŸใฏ Colab ใ‚ตใƒผใƒใƒผใฎ /tmp ใƒ‡ใ‚ฃใƒฌใ‚ฏใƒˆใƒชใซใ‚ใ‚‹ใ‚จใƒ•ใ‚งใƒกใƒฉใƒซ SQLite ใƒ‡ใƒผใ‚ฟใƒ™ใƒผใ‚นใซไฟๅญ˜ใ•ใ‚Œใพใ™ใ€‚ ใ‚ปใƒƒใƒˆใ‚ขใƒƒใƒ— ใพใšใ€ๅฟ…่ฆใชใƒ‘ใƒƒใ‚ฑใƒผใ‚ธใ‚’ใ‚คใƒณใ‚นใƒˆใƒผใƒซใ—ใฆใ‚คใƒณใƒใƒผใƒˆใ—ใ€ใƒ‘ใ‚นใ‚’่จญๅฎšใ—ใฆใ€ใƒ‡ใƒผใ‚ฟใ‚’ใƒ€ใ‚ฆใƒณใƒญใƒผใƒ‰ใ—ใพใ™ใ€‚ Pip ใฎใ‚ขใƒƒใƒ—ใ‚ฐใƒฌใƒผใƒ‰ ใƒญใƒผใ‚ซใƒซใงๅฎŸ่กŒใ™ใ‚‹ๅ ดๅˆใซใ‚ทใ‚นใƒ†ใƒ  Pipใ‚’ใ‚ขใƒƒใƒ—ใ‚ฐใƒฌใƒผใƒ‰ใ—ใชใ„ใ‚ˆใ†ใซใ™ใ‚‹ใซใฏใ€Colab ใงๅฎŸ่กŒใ—ใฆใ„ใ‚‹ใ“ใจใ‚’็ขบ่ชใ—ใฆใใ ใ•ใ„ใ€‚ใ‚‚ใกใ‚ใ‚“ใ€ใƒญใƒผใ‚ซใƒซใ‚ทใ‚นใƒ†ใƒ ใฏๅ€‹ๅˆฅใซใ‚ขใƒƒใƒ—ใ‚ฐใƒฌใƒผใƒ‰ใงใใพใ™ใ€‚ End of explanation """ !pip install -U tfx """ Explanation: TFX ใ‚’ใ‚คใƒณใ‚นใƒˆใƒผใƒซใ™ใ‚‹ ๆณจ๏ผšGoogle Colab ใงใฏใ€ใƒ‘ใƒƒใ‚ฑใƒผใ‚ธใŒๆ›ดๆ–ฐใ•ใ‚Œใ‚‹ใŸใ‚ใ€ใ“ใฎใ‚ปใƒซใ‚’ๅˆใ‚ใฆๅฎŸ่กŒใ™ใ‚‹ใจใใซใ€ใƒฉใƒณใ‚ฟใ‚คใƒ ใ‚’ๅ†่ตทๅ‹•ใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™๏ผˆ[ใƒฉใƒณใ‚ฟใ‚คใƒ ]&gt; [ใƒฉใƒณใ‚ฟใ‚คใƒ ใฎๅ†่ตทๅ‹•...]๏ผ‰ใ€‚ End of explanation """ import os import pprint import tempfile import urllib import absl import tensorflow as tf import tensorflow_model_analysis as tfma tf.get_logger().propagate = False pp = pprint.PrettyPrinter() from tfx import v1 as tfx from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext %load_ext tfx.orchestration.experimental.interactive.notebook_extensions.skip """ Explanation: ใƒฉใƒณใ‚ฟใ‚คใƒ ใ‚’ๅ†่ตทๅ‹•ใ—ใพใ—ใŸใ‹๏ผŸ Google Colab ใ‚’ไฝฟ็”จใ—ใฆใ„ใ‚‹ๅ ดๅˆใฏใ€ไธŠ่จ˜ใฎใ‚ปใƒซใ‚’ๅˆใ‚ใฆๅฎŸ่กŒใ™ใ‚‹ใจใใซใƒฉใƒณใ‚ฟใ‚คใƒ ใ‚’ๅ†่ตทๅ‹•ใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™๏ผˆ[ใƒฉใƒณใ‚ฟใ‚คใƒ ]&gt; [ใƒฉใƒณใ‚ฟใ‚คใƒ ใฎๅ†่ตทๅ‹•...]๏ผ‰ใ€‚ ใ“ใ‚Œใฏใ€Colab ใŒใƒ‘ใƒƒใ‚ฑใƒผใ‚ธใ‚’่ชญใฟ่พผใ‚€ใŸใ‚ใซๅฟ…่ฆใงใ™ใงใ™ใ€‚ ใƒ‘ใƒƒใ‚ฑใƒผใ‚ธใ‚’ใ‚คใƒณใƒใƒผใƒˆใ™ใ‚‹ ๆจ™ๆบ–ใฎ TFX ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆ ใ‚ฏใƒฉใ‚นใ‚’ๅซใ‚€ๅฟ…่ฆใชใƒ‘ใƒƒใ‚ฑใƒผใ‚ธใ‚’ใ‚คใƒณใƒใƒผใƒˆใ—ใพใ™ใ€‚ End of explanation """ print('TensorFlow version: {}'.format(tf.__version__)) print('TFX version: {}'.format(tfx.__version__)) """ Explanation: ใƒฉใ‚คใƒ–ใƒฉใƒชใฎใƒใƒผใ‚ธใƒงใƒณใ‚’็ขบ่ชใ—ใพใ™ใ€‚ End of explanation """ # This is the root directory for your TFX pip package installation. _tfx_root = tfx.__path__[0] # This is the directory containing the TFX Chicago Taxi Pipeline example. _taxi_root = os.path.join(_tfx_root, 'examples/chicago_taxi_pipeline') # This is the path where your model will be pushed for serving. _serving_model_dir = os.path.join( tempfile.mkdtemp(), 'serving_model/taxi_simple') # Set up logging. absl.logging.set_verbosity(absl.logging.INFO) """ Explanation: ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณ ใƒ‘ใ‚นใ‚’่จญๅฎš End of explanation """ _data_root = tempfile.mkdtemp(prefix='tfx-data') DATA_PATH = 'https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/chicago_taxi_pipeline/data/simple/data.csv' _data_filepath = os.path.join(_data_root, "data.csv") urllib.request.urlretrieve(DATA_PATH, _data_filepath) """ Explanation: ใ‚ตใƒณใƒ—ใƒซใƒ‡ใƒผใ‚ฟใฎใƒ€ใ‚ฆใƒณใƒญใƒผใƒ‰ TFX ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใงไฝฟ็”จใ™ใ‚‹ใ‚ตใƒณใƒ—ใƒซ ใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใ‚’ใƒ€ใ‚ฆใƒณใƒญใƒผใƒ‰ใ—ใพใ™ใ€‚ ไฝฟ็”จใ—ใฆใ„ใ‚‹ใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใฏใ€ใ‚ทใ‚ซใ‚ดๅธ‚ใŒใƒชใƒชใƒผใ‚นใ—ใŸ ใ‚ฟใ‚ฏใ‚ทใƒผใƒˆใƒชใƒƒใƒ—ใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใงใ™ใ€‚ ใ“ใฎใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใฎๅˆ—ใฏๆฌกใฎใจใŠใ‚Šใงใ™ใ€‚ <table> <tr> <td>pickup_community_area</td> <td>fare</td> <td>trip_start_month</td> </tr> <tr> <td>trip_start_hour</td> <td>trip_start_day</td> <td>trip_start_timestamp</td> </tr> <tr> <td>pickup_latitude</td> <td>pickup_longitude</td> <td>dropoff_latitude</td> </tr> <tr> <td>dropoff_longitude</td> <td>trip_miles</td> <td>pickup_census_tract</td> </tr> <tr> <td>dropoff_census_tract</td> <td>payment_type</td> <td>company</td> </tr> <tr> <td>trip_seconds</td> <td>dropoff_community_area</td> <td>tips</td> </tr> </table> ใ“ใฎใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใ‚’ไฝฟ็”จใ—ใฆใ€ใ‚ฟใ‚ฏใ‚ทใƒผไน—่ปŠใฎtipsใ‚’ไบˆๆธฌใ™ใ‚‹ใƒขใƒ‡ใƒซใ‚’ๆง‹็ฏ‰ใ—ใพใ™ใ€‚ End of explanation """ !head {_data_filepath} """ Explanation: CSV ใƒ•ใ‚กใ‚คใƒซใ‚’่ฆ‹ใฆใฟใพใ—ใ‚‡ใ†ใ€‚ End of explanation """ # Here, we create an InteractiveContext using default parameters. This will # use a temporary directory with an ephemeral ML Metadata database instance. # To use your own pipeline root or database, the optional properties # `pipeline_root` and `metadata_connection_config` may be passed to # InteractiveContext. Calls to InteractiveContext are no-ops outside of the # notebook. context = InteractiveContext() """ Explanation: ๆณจ๏ผšใ“ใฎWeb ใ‚ตใ‚คใƒˆใฏใ€ใ‚ทใ‚ซใ‚ดๅธ‚ใฎๅ…ฌๅผ Web ใ‚ตใ‚คใƒˆ www.cityofchicago.org ใงๅ…ฌ้–‹ใ•ใ‚ŒใŸใƒ‡ใƒผใ‚ฟใ‚’ๅค‰ๆ›ดใ—ใฆไฝฟ็”จใ™ใ‚‹ใ‚ขใƒ—ใƒชใ‚ฑใƒผใ‚ทใƒงใƒณใ‚’ๆไพ›ใ—ใพใ™ใ€‚ใ‚ทใ‚ซใ‚ดๅธ‚ใฏใ€ใ“ใฎ Web ใ‚ตใ‚คใƒˆใงๆไพ›ใ•ใ‚Œใ‚‹ใƒ‡ใƒผใ‚ฟใฎๅ†…ๅฎนใ€ๆญฃ็ขบๆ€งใ€้ฉๆ™‚ๆ€งใ€ใพใŸใฏๅฎŒๅ…จๆ€งใซใคใ„ใฆไธ€ๅˆ‡ใฎ่กจๆ˜Žใ‚’่กŒใ„ใพใ›ใ‚“ใ€‚ใ“ใฎ Web ใ‚ตใ‚คใƒˆใงๆไพ›ใ•ใ‚Œใ‚‹ใƒ‡ใƒผใ‚ฟใฏใ€ใ„ใคใงใ‚‚ๅค‰ๆ›ดใ•ใ‚Œใ‚‹ๅฏ่ƒฝๆ€งใŒใ‚ใ‚Šใพใ™ใ€‚ใ‹ใ‹ใ‚‹ Web ใ‚ตใ‚คใƒˆใงๆไพ›ใ•ใ‚Œใ‚‹ใƒ‡ใƒผใ‚ฟใฏใƒฆใƒผใ‚ถใƒผใฎ่‡ชๅทฑ่ฒฌไปปใงๅˆฉ็”จใ•ใ‚Œใ‚‹ใ‚‚ใฎใจใ—ใพใ™ใ€‚ InteractiveContext ใ‚’ไฝœๆˆใ™ใ‚‹ ๆœ€ๅพŒใซใ€ใ“ใฎใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใง TFX ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใ‚’ใ‚คใƒณใ‚ฟใƒฉใ‚ฏใƒ†ใ‚ฃใƒ–ใซๅฎŸ่กŒใงใใ‚‹ใ‚ˆใ†ใซใ™ใ‚‹ InteractiveContext ใ‚’ไฝœๆˆใ—ใพใ™ใ€‚ End of explanation """ example_gen = tfx.components.CsvExampleGen(input_base=_data_root) context.run(example_gen, enable_cache=True) """ Explanation: TFX ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใ‚’ใ‚คใƒณใ‚ฟใƒฉใ‚ฏใƒ†ใ‚ฃใƒ–ใซๅฎŸ่กŒใ™ใ‚‹ ๆฌกใฎใ‚ปใƒซใงใฏใ€TFX ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใ‚’ 1 ใคใšใคไฝœๆˆใ—ใ€ใใ‚Œใžใ‚Œใ‚’ๅฎŸ่กŒใ—ใฆใ€ๅ‡บๅŠ›ใ‚ขใƒผใƒ†ใ‚ฃใƒ•ใ‚กใ‚ฏใƒˆใ‚’่ฆ–่ฆšๅŒ–ใ—ใพใ™ใ€‚ ExampleGen ExampleGen ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฏ้€šๅธธใ€TFX ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใฎๅ…ˆ้ ญใซใ‚ใ‚Šใ€ไปฅไธ‹ใ‚’ๅฎŸ่กŒใ—ใพใ™ใ€‚ ใƒ‡ใƒผใ‚ฟใ‚’ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐ ใ‚ปใƒƒใƒˆใจ่ฉ•ไพกใ‚ปใƒƒใƒˆใซๅˆ†ๅ‰ฒใ—ใพใ™ (ใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใงใฏใ€2/3 ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐ + 1/3 ่ฉ•ไพก)ใ€‚ ใƒ‡ใƒผใ‚ฟใ‚’ tf.Example ๅฝขๅผใซๅค‰ๆ›ใ—ใพใ™ใ€‚ (่ฉณ็ดฐใฏใ“ใกใ‚‰) ไป–ใฎใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใŒใ‚ขใ‚ฏใ‚ปใ‚นใงใใ‚‹ใ‚ˆใ†ใซใ€ใƒ‡ใƒผใ‚ฟใ‚’ _tfx_root ใƒ‡ใ‚ฃใƒฌใ‚ฏใƒˆใƒชใซใ‚ณใƒ”ใƒผใ—ใพใ™ใ€‚ ExampleGen ใฏใ€ใƒ‡ใƒผใ‚ฟใ‚ฝใƒผใ‚นใธใฎใƒ‘ใ‚นใ‚’ๅ…ฅๅŠ›ใจใ—ใฆๅ—ใ‘ๅ–ใ‚Šใพใ™ใ€‚ ใ“ใ“ใงใฏใ€ใ“ใ‚Œใฏใƒ€ใ‚ฆใƒณใƒญใƒผใƒ‰ใ—ใŸ CSV ใ‚’ๅซใ‚€ _data_root ใƒ‘ใ‚นใงใ™ใ€‚ ๆณจๆ„: ใ“ใฎใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใงใฏใ€ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใ‚’ 1 ใคใšใคใ‚คใƒณใ‚นใ‚ฟใƒณใ‚นๅŒ–ใ—ใ€InteractiveContext.run() ใงๅฎŸ่กŒใ—ใพใ™ใŒใ€ๅฎŸ็จผๅƒ็’ฐๅขƒใงใฏใ€ใ™ในใฆใฎใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใ‚’ไบ‹ๅ‰ใซ PipelineใงๆŒ‡ๅฎšใ—ใฆใ€ใ‚ชใƒผใ‚ฑใ‚นใƒˆใƒฌใƒผใ‚ฟใƒผใซๆธกใ—ใพใ™๏ผˆTFX ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณ ใ‚ฌใ‚คใƒ‰ใฎๆง‹็ฏ‰ใ‚’ๅ‚็…งใ—ใฆใใ ใ•ใ„๏ผ‰ใ€‚ ใ‚ญใƒฃใƒƒใ‚ทใƒฅใ‚’ๆœ‰ๅŠนใซใ™ใ‚‹ ใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใง InteractiveContext ใ‚’ไฝฟ็”จใ—ใฆใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใ‚’ไฝœๆˆใ—ใฆใ„ใ‚‹ๅ ดๅˆใ€ๅ€‹ๅˆฅใฎใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใŒๅ‡บๅŠ›ใ‚’ใ‚ญใƒฃใƒƒใ‚ทใƒฅใ™ใ‚‹ใ‚ฟใ‚คใƒŸใƒณใ‚ฐใ‚’ๅˆถๅพกใ™ใ‚‹ใ“ใจใŒใงใใพใ™ใ€‚ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใŒๅ‰ใซ็”Ÿๆˆใ—ใŸๅ‡บๅŠ›ใ‚ขใƒผใƒ†ใ‚ฃใƒ•ใ‚กใ‚ฏใƒˆใ‚’ๅ†ๅˆฉ็”จใ™ใ‚‹ๅ ดๅˆใฏใ€enable_cache ใ‚’ True ใซ่จญๅฎšใ—ใพใ™ใ€‚ใ‚ณใƒผใƒ‰ใ‚’ๅค‰ๆ›ดใ™ใ‚‹ใชใฉใซใ‚ˆใ‚Šใ€ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฎๅ‡บๅŠ›ใ‚ขใƒผใƒ†ใ‚ฃใƒ•ใ‚กใ‚ฏใƒˆใ‚’ๅ†่จˆ็ฎ—ใ™ใ‚‹ๅ ดๅˆใฏใ€enable_cache ใ‚’ False ใซ่จญๅฎšใ—ใพใ™ใ€‚ End of explanation """ artifact = example_gen.outputs['examples'].get()[0] print(artifact.split_names, artifact.uri) """ Explanation: ExampleGenใฎๅ‡บๅŠ›ใ‚ขใƒผใƒ†ใ‚ฃใƒ•ใ‚กใ‚ฏใƒˆใ‚’่ชฟในใฆใฟใพใ—ใ‚‡ใ†ใ€‚ใ“ใฎใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฏใ€ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ‚ตใƒณใƒ—ใƒซใจ่ฉ•ไพกใ‚ตใƒณใƒ—ใƒซใฎ 2 ใคใฎใ‚ขใƒผใƒ†ใ‚ฃใƒ•ใ‚กใ‚ฏใƒˆใ‚’็”Ÿๆˆใ—ใพใ™ใ€‚ End of explanation """ # Get the URI of the output artifact representing the training examples, which is a directory train_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'Split-train') # Get the list of files in this directory (all compressed TFRecord files) tfrecord_filenames = [os.path.join(train_uri, name) for name in os.listdir(train_uri)] # Create a `TFRecordDataset` to read these files dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP") # Iterate over the first 3 records and decode them. for tfrecord in dataset.take(3): serialized_example = tfrecord.numpy() example = tf.train.Example() example.ParseFromString(serialized_example) pp.pprint(example) """ Explanation: ใพใŸใ€ๆœ€ๅˆใฎ 3 ใคใฎใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ‚ตใƒณใƒ—ใƒซใ‚‚่ฆ‹ใฆใฟใพใ™ใ€‚ End of explanation """ statistics_gen = tfx.components.StatisticsGen( examples=example_gen.outputs['examples']) context.run(statistics_gen, enable_cache=True) """ Explanation: ExampleGenใŒใƒ‡ใƒผใ‚ฟใฎๅ–ใ‚Š่พผใฟใ‚’ๅฎŒไบ†ใ—ใŸใฎใงใ€ๆฌกใฎใ‚นใƒ†ใƒƒใƒ—ใ€ใƒ‡ใƒผใ‚ฟๅˆ†ๆžใซ้€ฒใฟใพใ™ใ€‚ StatisticsGen StatisticsGenใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฏใ€ใƒ‡ใƒผใ‚ฟๅˆ†ๆž็”จใฎใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใฎ็ตฑ่จˆใ‚’่จˆ็ฎ—ใ—ใ€ใƒ€ใ‚ฆใƒณใ‚นใƒˆใƒชใƒผใƒ ใฎใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใงไฝฟ็”จใ—ใพใ™ใ€‚ใ“ใ‚Œใฏใ€TensorFlow Data Validation ใƒฉใ‚คใƒ–ใƒฉใƒชใ‚’ไฝฟ็”จใ—ใพใ™ใ€‚ StatisticsGenใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฏใ€ใƒ‡ใƒผใ‚ฟๅˆ†ๆž็”จใฎใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใฎ็ตฑ่จˆใ‚’่จˆ็ฎ—ใ—ใ€ใƒ€ใ‚ฆใƒณใ‚นใƒˆใƒชใƒผใƒ  ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใงไฝฟ็”จใ—ใพใ™ใ€‚ End of explanation """ context.show(statistics_gen.outputs['statistics']) """ Explanation: StatisticsGen ใฎๅฎŸ่กŒใŒๅฎŒไบ†ใ™ใ‚‹ใจใ€ๅ‡บๅŠ›ใ•ใ‚ŒใŸ็ตฑ่จˆใ‚’่ฆ–่ฆšๅŒ–ใงใใพใ™ใ€‚ ่‰ฒใ€…ใชใƒ—ใƒญใƒƒใƒˆใ‚’่ฉฆใ—ใฆใฟใฆใใ ใ•ใ„๏ผ End of explanation """ schema_gen = tfx.components.SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=False) context.run(schema_gen, enable_cache=True) """ Explanation: SchemaGen SchemaGen ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฏใ€ใƒ‡ใƒผใ‚ฟ็ตฑ่จˆใซๅŸบใฅใ„ใฆใ‚นใ‚ญใƒผใƒžใ‚’็”Ÿๆˆใ—ใพใ™ใ€‚๏ผˆใ‚นใ‚ญใƒผใƒžใฏใ€ใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆๅ†…ใฎ็‰นๅพดใฎไบˆๆƒณใ•ใ‚Œใ‚‹ๅขƒ็•Œใ€ใ‚ฟใ‚คใƒ—ใ€ใƒ—ใƒญใƒ‘ใƒ†ใ‚ฃใ‚’ๅฎš็พฉใ—ใพใ™ใ€‚๏ผ‰ใพใŸใ€TensorFlow ใƒ‡ใƒผใ‚ฟๆคœ่จผใƒฉใ‚คใƒ–ใƒฉใƒชใ‚‚ไฝฟ็”จใ—ใพใ™ใ€‚ ๆณจๆ„: ็”Ÿๆˆใ•ใ‚ŒใŸใ‚นใ‚ญใƒผใƒžใฏใƒ™ใ‚นใƒˆใ‚จใƒ•ใ‚ฉใƒผใƒˆใฎใ‚‚ใฎใงใ€ใƒ‡ใƒผใ‚ฟใฎๅŸบๆœฌ็š„ใชใƒ—ใƒญใƒ‘ใƒ†ใ‚ฃใ ใ‘ใ‚’ๆŽจ่ซ–ใ—ใ‚ˆใ†ใจใ—ใพใ™ใ€‚็ขบ่ชใ—ใ€ๅฟ…่ฆใซๅฟœใ˜ใฆไฟฎๆญฃใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™ใ€‚ SchemaGen ใฏใ€StatisticsGen ใง็”Ÿๆˆใ—ใŸ็ตฑ่จˆใ‚’ๅ…ฅๅŠ›ใจใ—ใฆๅ—ใ‘ๅ–ใ‚Šใ€ใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใงใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐๅˆ†ๅ‰ฒใ‚’ๅ‚็…งใ—ใพใ™ใ€‚ End of explanation """ context.show(schema_gen.outputs['schema']) """ Explanation: SchemaGen ใฎๅฎŸ่กŒใŒๅฎŒไบ†ใ™ใ‚‹ใจใ€็”Ÿๆˆใ•ใ‚ŒใŸใ‚นใ‚ญใƒผใƒžใ‚’ใƒ†ใƒผใƒ–ใƒซใจใ—ใฆ่ฆ–่ฆšๅŒ–ใงใใพใ™ใ€‚ End of explanation """ example_validator = tfx.components.ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) context.run(example_validator, enable_cache=True) """ Explanation: ใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใฎใใ‚Œใžใ‚Œใฎ็‰นๅพดใฏใ€ใ‚นใ‚ญใƒผใƒž ใƒ†ใƒผใƒ–ใƒซใฎใƒ—ใƒญใƒ‘ใƒ†ใ‚ฃใฎๆจชใซ่กŒใจใ—ใฆ่กจ็คบใ•ใ‚Œใพใ™ใ€‚ใ‚นใ‚ญใƒผใƒžใฏใ€ใƒ‰ใƒกใ‚คใƒณใจใ—ใฆ็คบใ•ใ‚Œใ‚‹ใ€ใ‚ซใƒ†ใ‚ดใƒช็‰นๅพดใŒๅ–ใ‚‹ใ™ในใฆใฎๅ€คใ‚‚ใ‚ญใƒฃใƒ—ใƒใƒฃใ—ใพใ™ใ€‚ ใ‚นใ‚ญใƒผใƒžใฎ่ฉณ็ดฐใซใคใ„ใฆใฏใ€SchemaGen ใฎใƒ‰ใ‚ญใƒฅใƒกใƒณใƒˆใ‚’ใ”่ฆงใใ ใ•ใ„ใ€‚ ExampleValidator ExampleValidator ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฏใ€ใ‚นใ‚ญใƒผใƒžใงๅฎš็พฉใ•ใ‚ŒใŸๆœŸๅพ…ใซๅŸบใฅใ„ใฆใ€ใƒ‡ใƒผใ‚ฟใฎ็•ฐๅธธใ‚’ๆคœๅ‡บใ—ใพใ™ใ€‚ใพใŸใ€TensorFlow Data Validation ใƒฉใ‚คใƒ–ใƒฉใƒชใ‚‚ไฝฟ็”จใ—ใพใ™ใ€‚ ExampleValidator ใฏใ€Statistics Gen{/code 1} ใ‹ใ‚‰ใฎ็ตฑ่จˆใจ &lt;code data-md-type="codespan"&gt;SchemaGen ใ‹ใ‚‰ใฎใ‚นใ‚ญใƒผใƒžใ‚’ๅ…ฅๅŠ›ใจใ—ใฆๅ—ใ‘ๅ–ใ‚Šใพใ™ใ€‚ End of explanation """ context.show(example_validator.outputs['anomalies']) """ Explanation: ExampleValidator ใฎๅฎŸ่กŒใŒๅฎŒไบ†ใ™ใ‚‹ใจใ€็•ฐๅธธใ‚’ใƒ†ใƒผใƒ–ใƒซใจใ—ใฆ่ฆ–่ฆšๅŒ–ใงใใพใ™ใ€‚ End of explanation """ _taxi_constants_module_file = 'taxi_constants.py' %%writefile {_taxi_constants_module_file} NUMERICAL_FEATURES = ['trip_miles', 'fare', 'trip_seconds'] BUCKET_FEATURES = [ 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude' ] # Number of buckets used by tf.transform for encoding each feature. FEATURE_BUCKET_COUNT = 10 CATEGORICAL_NUMERICAL_FEATURES = [ 'trip_start_hour', 'trip_start_day', 'trip_start_month', 'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area', 'dropoff_community_area' ] CATEGORICAL_STRING_FEATURES = [ 'payment_type', 'company', ] # Number of vocabulary terms used for encoding categorical features. VOCAB_SIZE = 1000 # Count of out-of-vocab buckets in which unrecognized categorical are hashed. OOV_SIZE = 10 # Keys LABEL_KEY = 'tips' FARE_KEY = 'fare' def t_name(key): """ Rename the feature keys so that they don't clash with the raw keys when running the Evaluator component. Args: key: The original feature key Returns: key with '_xf' appended """ return key + '_xf' """ Explanation: ็•ฐๅธธใƒ†ใƒผใƒ–ใƒซใงใฏใ€็•ฐๅธธใŒใชใ„ใ“ใจใŒใ‚ใ‹ใ‚Šใพใ™ใ€‚ใ“ใ‚Œใฏใ€ๅˆ†ๆžใ—ใŸๆœ€ๅˆใฎใƒ‡ใƒผใ‚ฟใ‚ปใƒƒใƒˆใงใ€ใ‚นใ‚ญใƒผใƒžใฏใ“ใ‚Œใซๅˆใ‚ใ›ใฆ่ชฟๆ•ดใ•ใ‚Œใฆใ„ใ‚‹ใŸใ‚ใ€็•ฐๅธธใŒใชใ„ใ“ใจใŒไบˆๆƒณใ•ใ‚Œใพใ™ใ€‚ใ“ใฎใ‚นใ‚ญใƒผใƒžใ‚’็ขบ่ชใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™ใ€‚ไบˆๆœŸใ•ใ‚Œใชใ„ใ‚‚ใฎใฏใ€ใƒ‡ใƒผใ‚ฟใซ็•ฐๅธธใŒใ‚ใ‚‹ใ“ใจใ‚’ๆ„ๅ‘ณใ—ใพใ™ใ€‚็ขบ่ชใ•ใ‚ŒใŸใ‚นใ‚ญใƒผใƒžใ‚’ไฝฟ็”จใ—ใฆๅฐ†ๆฅใฎใƒ‡ใƒผใ‚ฟใ‚’ไฟ่ญทใงใใพใ™ใ€‚ใ“ใ“ใง็”Ÿๆˆใ•ใ‚ŒใŸ็•ฐๅธธใฏใ€ใƒขใƒ‡ใƒซใฎใƒ‘ใƒ•ใ‚ฉใƒผใƒžใƒณใ‚นใ‚’ใƒ‡ใƒใƒƒใ‚ฐใ—ใ€ใƒ‡ใƒผใ‚ฟใŒๆ™‚้–“ใฎ็ตŒ้Žใจใจใ‚‚ใซใฉใฎใ‚ˆใ†ใซๅค‰ๅŒ–ใ™ใ‚‹ใ‹ใ‚’็†่งฃใ—ใ€ใƒ‡ใƒผใ‚ฟ ใ‚จใƒฉใƒผใ‚’็‰นๅฎšใ™ใ‚‹ใŸใ‚ใซไฝฟ็”จใงใใพใ™ใ€‚ ๅค‰ๆ› Transformใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฏใ€ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใจใ‚ตใƒผใƒ“ใƒณใ‚ฐใฎไธกๆ–นใง็‰นๅพด้‡ใ‚จใƒณใ‚ธใƒ‹ใ‚ขใƒชใƒณใ‚ฐใ‚’ๅฎŸ่กŒใ—ใพใ™ใ€‚ใ“ใ‚Œใฏใ€ TensorFlow Transform ใƒฉใ‚คใƒ–ใƒฉใƒชใ‚’ไฝฟ็”จใ—ใพใ™ใ€‚ Transformใฏใ€ExampleGenใ‹ใ‚‰ใฎใƒ‡ใƒผใ‚ฟใ€SchemaGenใ‹ใ‚‰ใฎใ‚นใ‚ญใƒผใƒžใ€ใƒฆใƒผใ‚ถใƒผๅฎš็พฉใฎ Transform ใ‚ณใƒผใƒ‰ใ‚’ๅซใ‚€ใƒขใ‚ธใƒฅใƒผใƒซใ‚’ๅ…ฅๅŠ›ใจใ—ใฆๅ—ใ‘ๅ–ใ‚Šใพใ™ใ€‚ ไปฅไธ‹ใฎใƒฆใƒผใ‚ถใƒผๅฎš็พฉใฎ Transform ใ‚ณใƒผใƒ‰ใฎไพ‹ใ‚’่ฆ‹ใฆใฟใพใ—ใ‚‡ใ†๏ผˆTensorFlow Transform API ใฎๆฆ‚่ฆใซใคใ„ใฆใฏใ€ใƒใƒฅใƒผใƒˆใƒชใ‚ขใƒซใ‚’ๅ‚็…งใ—ใฆใใ ใ•ใ„๏ผ‰ใ€‚ใพใšใ€็‰นๅพด้‡ใ‚จใƒณใ‚ธใƒ‹ใ‚ขใƒชใƒณใ‚ฐใฎใ„ใใคใ‹ใฎๅฎšๆ•ฐใ‚’ๅฎš็พฉใ—ใพใ™ใ€‚ ๆณจๆ„: %%writefile ใ‚ปใƒซ ใƒžใ‚ธใƒƒใ‚ฏใฏใ€ใ‚ปใƒซใฎๅ†…ๅฎนใ‚’ใƒ‡ใ‚ฃใ‚นใ‚ฏไธŠใฎ.pyใƒ•ใ‚กใ‚คใƒซใจใ—ใฆไฟๅญ˜ใ—ใพใ™ใ€‚ใ“ใ‚Œใซใ‚ˆใ‚Šใ€Transform ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฏใ‚ณใƒผใƒ‰ใ‚’ใƒขใ‚ธใƒฅใƒผใƒซใจใ—ใฆ่ชญใฟ่พผใ‚€ใ“ใจใŒใงใใพใ™ใ€‚ End of explanation """ _taxi_transform_module_file = 'taxi_transform.py' %%writefile {_taxi_transform_module_file} import tensorflow as tf import tensorflow_transform as tft # Imported files such as taxi_constants are normally cached, so changes are # not honored after the first import. Normally this is good for efficiency, but # during development when we may be iterating code it can be a problem. To # avoid this problem during development, reload the file. import taxi_constants import sys if 'google.colab' in sys.modules: # Testing to see if we're doing development import importlib importlib.reload(taxi_constants) _NUMERICAL_FEATURES = taxi_constants.NUMERICAL_FEATURES _BUCKET_FEATURES = taxi_constants.BUCKET_FEATURES _FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT _CATEGORICAL_NUMERICAL_FEATURES = taxi_constants.CATEGORICAL_NUMERICAL_FEATURES _CATEGORICAL_STRING_FEATURES = taxi_constants.CATEGORICAL_STRING_FEATURES _VOCAB_SIZE = taxi_constants.VOCAB_SIZE _OOV_SIZE = taxi_constants.OOV_SIZE _FARE_KEY = taxi_constants.FARE_KEY _LABEL_KEY = taxi_constants.LABEL_KEY def _make_one_hot(x, key): """Make a one-hot tensor to encode categorical features. Args: X: A dense tensor key: A string key for the feature in the input Returns: A dense one-hot tensor as a float list """ integerized = tft.compute_and_apply_vocabulary(x, top_k=_VOCAB_SIZE, num_oov_buckets=_OOV_SIZE, vocab_filename=key, name=key) depth = ( tft.experimental.get_vocabulary_size_by_name(key) + _OOV_SIZE) one_hot_encoded = tf.one_hot( integerized, depth=tf.cast(depth, tf.int32), on_value=1.0, off_value=0.0) return tf.reshape(one_hot_encoded, [-1, depth]) def _fill_in_missing(x): """Replace missing values in a SparseTensor. Fills in missing values of `x` with '' or 0, and converts to a dense tensor. Args: x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1 in the second dimension. Returns: A rank 1 tensor where missing values of `x` have been filled in. """ if not isinstance(x, tf.sparse.SparseTensor): return x default_value = '' if x.dtype == tf.string else 0 return tf.squeeze( tf.sparse.to_dense( tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]), default_value), axis=1) def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ outputs = {} for key in _NUMERICAL_FEATURES: # If sparse make it dense, setting nan's to 0 or '', and apply zscore. outputs[taxi_constants.t_name(key)] = tft.scale_to_z_score( _fill_in_missing(inputs[key]), name=key) for key in _BUCKET_FEATURES: outputs[taxi_constants.t_name(key)] = tf.cast(tft.bucketize( _fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT, name=key), dtype=tf.float32) for key in _CATEGORICAL_STRING_FEATURES: outputs[taxi_constants.t_name(key)] = _make_one_hot(_fill_in_missing(inputs[key]), key) for key in _CATEGORICAL_NUMERICAL_FEATURES: outputs[taxi_constants.t_name(key)] = _make_one_hot(tf.strings.strip( tf.strings.as_string(_fill_in_missing(inputs[key]))), key) # Was this passenger a big tipper? taxi_fare = _fill_in_missing(inputs[_FARE_KEY]) tips = _fill_in_missing(inputs[_LABEL_KEY]) outputs[_LABEL_KEY] = tf.where( tf.math.is_nan(taxi_fare), tf.cast(tf.zeros_like(taxi_fare), tf.int64), # Test if the tip was > 20% of the fare. tf.cast( tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64)) return outputs """ Explanation: ๆฌกใซใ€็”Ÿใƒ‡ใƒผใ‚ฟใ‚’ๅ…ฅๅŠ›ใจใ—ใฆๅ—ใ‘ๅ–ใ‚Šใ€ใƒขใƒ‡ใƒซใŒใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใงใใ‚‹ๅค‰ๆ›ใ•ใ‚ŒใŸ็‰นๅพด้‡ใ‚’่ฟ”ใ™ {code 0}preprocessing _fn ใ‚’่จ˜่ฟฐใ—ใพใ™ใ€‚ End of explanation """ transform = tfx.components.Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=os.path.abspath(_taxi_transform_module_file)) context.run(transform, enable_cache=True) """ Explanation: ๆฌกใซใ€ใ“ใฎ็‰นๅพด้‡ใ‚จใƒณใ‚ธใƒ‹ใ‚ขใƒชใƒณใ‚ฐ ใ‚ณใƒผใƒ‰ใ‚’ Transformใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใซๆธกใ—ใ€ๅฎŸ่กŒใ—ใฆใƒ‡ใƒผใ‚ฟใ‚’ๅค‰ๆ›ใ—ใพใ™ใ€‚ End of explanation """ transform.outputs """ Explanation: Transformใฎๅ‡บๅŠ›ใ‚ขใƒผใƒ†ใ‚ฃใƒ•ใ‚กใ‚ฏใƒˆใ‚’่ชฟในใฆใฟใพใ—ใ‚‡ใ†ใ€‚ใ“ใฎใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฏใ€2 ็จฎ้กžใฎๅ‡บๅŠ›ใ‚’็”Ÿๆˆใ—ใพใ™ใ€‚ transform_graph ใฏใ€ๅ‰ๅ‡ฆ็†ๆผ”็ฎ—ใ‚’ๅฎŸ่กŒใงใใ‚‹ใ‚ฐใƒฉใƒ•ใงใ™ (ใ“ใฎใ‚ฐใƒฉใƒ•ใฏใ€ใ‚ตใƒผใƒ“ใƒณใ‚ฐใƒขใƒ‡ใƒซใจ่ฉ•ไพกใƒขใƒ‡ใƒซใซๅซใพใ‚Œใพใ™)ใ€‚ transformed_examplesใฏๅ‰ๅ‡ฆ็†ใ•ใ‚ŒใŸใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใŠใ‚ˆใณ่ฉ•ไพกใƒ‡ใƒผใ‚ฟใ‚’่กจใ—ใพใ™ใ€‚ End of explanation """ train_uri = transform.outputs['transform_graph'].get()[0].uri os.listdir(train_uri) """ Explanation: transform_graph ใ‚ขใƒผใƒ†ใ‚ฃใƒ•ใ‚กใ‚ฏใƒˆใ‚’่ฆ‹ใฆใฟใพใ—ใ‚‡ใ†ใ€‚ใ“ใ‚Œใฏใ€3 ใคใฎใ‚ตใƒ–ใƒ‡ใ‚ฃใƒฌใ‚ฏใƒˆใƒชใ‚’ๅซใ‚€ใƒ‡ใ‚ฃใƒฌใ‚ฏใƒˆใƒชใ‚’ๆŒ‡ใ—ใฆใ„ใพใ™ใ€‚ End of explanation """ # Get the URI of the output artifact representing the transformed examples, which is a directory train_uri = os.path.join(transform.outputs['transformed_examples'].get()[0].uri, 'Split-train') # Get the list of files in this directory (all compressed TFRecord files) tfrecord_filenames = [os.path.join(train_uri, name) for name in os.listdir(train_uri)] # Create a `TFRecordDataset` to read these files dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP") # Iterate over the first 3 records and decode them. for tfrecord in dataset.take(3): serialized_example = tfrecord.numpy() example = tf.train.Example() example.ParseFromString(serialized_example) pp.pprint(example) """ Explanation: transformed_metadata ใ‚ตใƒ–ใƒ‡ใ‚ฃใƒฌใ‚ฏใƒˆใƒชใซใฏใ€ๅ‰ๅ‡ฆ็†ใ•ใ‚ŒใŸใƒ‡ใƒผใ‚ฟใฎใ‚นใ‚ญใƒผใƒžใŒๅซใพใ‚Œใฆใ„ใพใ™ใ€‚transform_fnใ‚ตใƒ–ใƒ‡ใ‚ฃใƒฌใ‚ฏใƒˆใƒชใซใฏใ€ๅฎŸ้š›ใฎๅ‰ๅ‡ฆ็†ใ‚ฐใƒฉใƒ•ใŒๅซใพใ‚Œใฆใ„ใพใ™ใ€‚metadataใ‚ตใƒ–ใƒ‡ใ‚ฃใƒฌใ‚ฏใƒˆใƒชใซใฏใ€ๅ…ƒใฎใƒ‡ใƒผใ‚ฟใฎใ‚นใ‚ญใƒผใƒžใŒๅซใพใ‚Œใฆใ„ใพใ™ใ€‚ ใพใŸใ€ๆœ€ๅˆใฎ 3 ใคใฎๅค‰ๆ›ใ•ใ‚ŒใŸไพ‹ใ‚‚่ฆ‹ใฆใฟใพใ™ใ€‚ End of explanation """ _taxi_trainer_module_file = 'taxi_trainer.py' %%writefile {_taxi_trainer_module_file} from typing import Dict, List, Text import os import glob from absl import logging import datetime import tensorflow as tf import tensorflow_transform as tft from tfx import v1 as tfx from tfx_bsl.public import tfxio from tensorflow_transform import TFTransformOutput # Imported files such as taxi_constants are normally cached, so changes are # not honored after the first import. Normally this is good for efficiency, but # during development when we may be iterating code it can be a problem. To # avoid this problem during development, reload the file. import taxi_constants import sys if 'google.colab' in sys.modules: # Testing to see if we're doing development import importlib importlib.reload(taxi_constants) _LABEL_KEY = taxi_constants.LABEL_KEY _BATCH_SIZE = 40 def _input_fn(file_pattern: List[Text], data_accessor: tfx.components.DataAccessor, tf_transform_output: tft.TFTransformOutput, batch_size: int = 200) -> tf.data.Dataset: """Generates features and label for tuning/training. Args: file_pattern: List of paths or patterns of input tfrecord files. data_accessor: DataAccessor for converting input to RecordBatch. tf_transform_output: A TFTransformOutput. batch_size: representing the number of consecutive elements of returned dataset to combine in a single batch Returns: A dataset that contains (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ return data_accessor.tf_dataset_factory( file_pattern, tfxio.TensorFlowDatasetOptions( batch_size=batch_size, label_key=_LABEL_KEY), tf_transform_output.transformed_metadata.schema) def _get_tf_examples_serving_signature(model, tf_transform_output): """Returns a serving signature that accepts `tensorflow.Example`.""" # We need to track the layers in the model in order to save it. # TODO(b/162357359): Revise once the bug is resolved. model.tft_layer_inference = tf_transform_output.transform_features_layer() @tf.function(input_signature=[ tf.TensorSpec(shape=[None], dtype=tf.string, name='examples') ]) def serve_tf_examples_fn(serialized_tf_example): """Returns the output to be used in the serving signature.""" raw_feature_spec = tf_transform_output.raw_feature_spec() # Remove label feature since these will not be present at serving time. raw_feature_spec.pop(_LABEL_KEY) raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec) transformed_features = model.tft_layer_inference(raw_features) logging.info('serve_transformed_features = %s', transformed_features) outputs = model(transformed_features) # TODO(b/154085620): Convert the predicted labels from the model using a # reverse-lookup (opposite of transform.py). return {'outputs': outputs} return serve_tf_examples_fn def _get_transform_features_signature(model, tf_transform_output): """Returns a serving signature that applies tf.Transform to features.""" # We need to track the layers in the model in order to save it. # TODO(b/162357359): Revise once the bug is resolved. model.tft_layer_eval = tf_transform_output.transform_features_layer() @tf.function(input_signature=[ tf.TensorSpec(shape=[None], dtype=tf.string, name='examples') ]) def transform_features_fn(serialized_tf_example): """Returns the transformed_features to be fed as input to evaluator.""" raw_feature_spec = tf_transform_output.raw_feature_spec() raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec) transformed_features = model.tft_layer_eval(raw_features) logging.info('eval_transformed_features = %s', transformed_features) return transformed_features return transform_features_fn def export_serving_model(tf_transform_output, model, output_dir): """Exports a keras model for serving. Args: tf_transform_output: Wrapper around output of tf.Transform. model: A keras model to export for serving. output_dir: A directory where the model will be exported to. """ # The layer has to be saved to the model for keras tracking purpases. model.tft_layer = tf_transform_output.transform_features_layer() signatures = { 'serving_default': _get_tf_examples_serving_signature(model, tf_transform_output), 'transform_features': _get_transform_features_signature(model, tf_transform_output), } model.save(output_dir, save_format='tf', signatures=signatures) def _build_keras_model(tf_transform_output: TFTransformOutput ) -> tf.keras.Model: """Creates a DNN Keras model for classifying taxi data. Args: tf_transform_output: [TFTransformOutput], the outputs from Transform Returns: A keras Model. """ feature_spec = tf_transform_output.transformed_feature_spec().copy() feature_spec.pop(_LABEL_KEY) inputs = {} for key, spec in feature_spec.items(): if isinstance(spec, tf.io.VarLenFeature): inputs[key] = tf.keras.layers.Input( shape=[None], name=key, dtype=spec.dtype, sparse=True) elif isinstance(spec, tf.io.FixedLenFeature): # TODO(b/208879020): Move into schema such that spec.shape is [1] and not # [] for scalars. inputs[key] = tf.keras.layers.Input( shape=spec.shape or [1], name=key, dtype=spec.dtype) else: raise ValueError('Spec type is not supported: ', key, spec) output = tf.keras.layers.Concatenate()(tf.nest.flatten(inputs)) output = tf.keras.layers.Dense(100, activation='relu')(output) output = tf.keras.layers.Dense(70, activation='relu')(output) output = tf.keras.layers.Dense(50, activation='relu')(output) output = tf.keras.layers.Dense(20, activation='relu')(output) output = tf.keras.layers.Dense(1)(output) return tf.keras.Model(inputs=inputs, outputs=output) # TFX Trainer will call this function. def run_fn(fn_args: tfx.components.FnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. """ tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor, tf_transform_output, _BATCH_SIZE) eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor, tf_transform_output, _BATCH_SIZE) model = _build_keras_model(tf_transform_output) model.compile( loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics=[tf.keras.metrics.BinaryAccuracy()]) tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=fn_args.model_run_dir, update_freq='batch') model.fit( train_dataset, steps_per_epoch=fn_args.train_steps, validation_data=eval_dataset, validation_steps=fn_args.eval_steps, callbacks=[tensorboard_callback]) # Export the model. export_serving_model(tf_transform_output, model, fn_args.serving_model_dir) """ Explanation: Transformใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใŒใƒ‡ใƒผใ‚ฟใ‚’็‰นๅพด้‡ใซๅค‰ๆ›ใ—ใŸใ‚‰ใ€ๆฌกใซใƒขใƒ‡ใƒซใ‚’ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ—ใพใ™ใ€‚ ใƒˆใƒฌใƒผใƒŠใƒผ Trainerใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฏใ€TensorFlow ใงๅฎš็พฉใ—ใŸใƒขใƒ‡ใƒซใ‚’ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ—ใพใ™ใ€‚ใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใงใฏใ€Trainer ใฏ Estimator API ใ‚’ใ‚ตใƒใƒผใƒˆใ—ใพใ™ใ€‚Keras API ใ‚’ไฝฟ็”จใ™ใ‚‹ใซใฏใ€ใƒˆใƒฌใƒผใƒŠใƒผใฎใ‚ณใƒณใ‚นใƒˆใƒฉใ‚ฏใ‚ฟใƒผใงcustom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor)ใ‚’ใ‚ปใƒƒใƒˆใ‚ขใƒƒใƒ—ใ—ใฆ Generic Trainer ใ‚’ๆŒ‡ๅฎšใ™ใ‚‹ๅฟ…่ฆใŒใ‚ใ‚Šใพใ™ใ€‚ Trainer ใฏใ€SchemaGenใ‹ใ‚‰ใฎใ‚นใ‚ญใƒผใƒžใ€Transformใ‹ใ‚‰ใฎๅค‰ๆ›ใ•ใ‚ŒใŸใƒ‡ใƒผใ‚ฟใจใ‚ฐใƒฉใƒ•ใ€ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐ ใƒ‘ใƒฉใƒกใƒผใ‚ฟใ€ใŠใ‚ˆใณใƒฆใƒผใ‚ถใƒผๅฎš็พฉใ•ใ‚ŒใŸใƒขใƒ‡ใƒซ ใ‚ณใƒผใƒ‰ใ‚’ๅซใ‚€ใƒขใ‚ธใƒฅใƒผใƒซใ‚’ๅ…ฅๅŠ›ใจใ—ใฆๅ—ใ‘ๅ–ใ‚Šใพใ™ใ€‚ ไปฅไธ‹ใฎใƒฆใƒผใ‚ถใƒผๅฎš็พฉใƒขใƒ‡ใƒซ ใ‚ณใƒผใƒ‰ใฎไพ‹ใ‚’่ฆ‹ใฆใฟใพใ—ใ‚‡ใ†๏ผˆTensorFlow Keras API ใฎๆฆ‚่ฆใซใคใ„ใฆใฏใ€ใƒใƒฅใƒผใƒˆใƒชใ‚ขใƒซใ‚’ๅ‚็…งใ—ใฆใใ ใ•ใ„๏ผ‰ใ€‚ End of explanation """ trainer = tfx.components.Trainer( module_file=os.path.abspath(_taxi_trainer_module_file), examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], schema=schema_gen.outputs['schema'], train_args=tfx.proto.TrainArgs(num_steps=10000), eval_args=tfx.proto.EvalArgs(num_steps=5000)) context.run(trainer, enable_cache=True) """ Explanation: ๆฌกใซใ€ใ“ใฎใƒขใƒ‡ใƒซ ใ‚ณใƒผใƒ‰ใ‚’Trainerใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใซๆธกใ—ใ€ใใ‚Œใ‚’ๅฎŸ่กŒใ—ใฆใƒขใƒ‡ใƒซใ‚’ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ—ใพใ™ใ€‚ End of explanation """ model_artifact_dir = trainer.outputs['model'].get()[0].uri pp.pprint(os.listdir(model_artifact_dir)) model_dir = os.path.join(model_artifact_dir, 'Format-Serving') pp.pprint(os.listdir(model_dir)) """ Explanation: TensorBoard ใงใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ‚’ๅˆ†ๆžใ™ใ‚‹ ใƒˆใƒฌใƒผใƒŠใƒผใฎใ‚ขใƒผใƒ†ใ‚ฃใƒ•ใ‚กใ‚ฏใƒˆใ‚’่ฆ‹ใฆใฟใพใ—ใ‚‡ใ†ใ€‚ใ“ใ‚Œใฏใƒขใƒ‡ใƒซใฎใ‚ตใƒ–ใƒ‡ใ‚ฃใƒฌใ‚ฏใƒˆใƒชใ‚’ๅซใ‚€ใƒ‡ใ‚ฃใƒฌใ‚ฏใƒˆใƒชใ‚’ๆŒ‡ใ—ใฆใ„ใพใ™ใ€‚ End of explanation """ model_run_artifact_dir = trainer.outputs['model_run'].get()[0].uri %load_ext tensorboard %tensorboard --logdir {model_run_artifact_dir} """ Explanation: ใ‚ชใƒ—ใ‚ทใƒงใƒณใงใ€TensorBoard ใ‚’ Trainer ใซๆŽฅ็ถšใ—ใฆใ€ใƒขใƒ‡ใƒซใฎๅญฆ็ฟ’ๆ›ฒ็ทšใ‚’ๅˆ†ๆžใงใใพใ™ใ€‚ End of explanation """ # Imported files such as taxi_constants are normally cached, so changes are # not honored after the first import. Normally this is good for efficiency, but # during development when we may be iterating code it can be a problem. To # avoid this problem during development, reload the file. import taxi_constants import sys if 'google.colab' in sys.modules: # Testing to see if we're doing development import importlib importlib.reload(taxi_constants) eval_config = tfma.EvalConfig( model_specs=[ # This assumes a serving model with signature 'serving_default'. If # using estimator based EvalSavedModel, add signature_name: 'eval' and # remove the label_key. tfma.ModelSpec( signature_name='serving_default', label_key=taxi_constants.LABEL_KEY, preprocessing_function_names=['transform_features'], ) ], metrics_specs=[ tfma.MetricsSpec( # The metrics added here are in addition to those saved with the # model (assuming either a keras model or EvalSavedModel is used). # Any metrics added into the saved model (for example using # model.compile(..., metrics=[...]), etc) will be computed # automatically. # To add validation thresholds for metrics saved with the model, # add them keyed by metric name to the thresholds map. metrics=[ tfma.MetricConfig(class_name='ExampleCount'), tfma.MetricConfig(class_name='BinaryAccuracy', threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': 0.5}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ] ) ], slicing_specs=[ # An empty slice spec means the overall slice, i.e. the whole dataset. tfma.SlicingSpec(), # Data can be sliced along a feature column. In this case, data is # sliced along feature column trip_start_hour. tfma.SlicingSpec( feature_keys=['trip_start_hour']) ]) """ Explanation: Evaluator Evaluator ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฏใ€่ฉ•ไพกใ‚ปใƒƒใƒˆใซๅฏพใ—ใฆใƒขใƒ‡ใƒซ ใƒ‘ใƒ•ใ‚ฉใƒผใƒžใƒณใ‚นๆŒ‡ๆจ™ใ‚’่จˆ็ฎ—ใ—ใพใ™ใ€‚TensorFlow Model Analysisใƒฉใ‚คใƒ–ใƒฉใƒชใ‚’ไฝฟ็”จใ—ใพใ™ใ€‚Evaluatorใฏใ€ใ‚ชใƒ—ใ‚ทใƒงใƒณใงใ€ๆ–ฐใ—ใใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ•ใ‚ŒใŸใƒขใƒ‡ใƒซใŒไปฅๅ‰ใฎใƒขใƒ‡ใƒซใ‚ˆใ‚Šใ‚‚ๅ„ชใ‚Œใฆใ„ใ‚‹ใ“ใจใ‚’ๆคœ่จผใงใใพใ™ใ€‚ใ“ใ‚Œใฏใ€ใƒขใƒ‡ใƒซใ‚’ๆฏŽๆ—ฅ่‡ชๅ‹•็š„ใซใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใŠใ‚ˆใณๆคœ่จผใ™ใ‚‹ๅฎŸ็จผๅƒ็’ฐๅขƒใฎใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณ่จญๅฎšใงๅฝน็ซ‹ใกใพใ™ใ€‚ใ“ใฎใƒŽใƒผใƒˆใƒ–ใƒƒใ‚ฏใงใฏ 1 ใคใฎใƒขใƒ‡ใƒซใฎใฟใ‚’ใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐใ™ใ‚‹ใŸใ‚ใ€Evaluatorใฏใƒขใƒ‡ใƒซใซ่‡ชๅ‹•็š„ใซใ€Œgoodใ€ใจใ„ใ†ใƒฉใƒ™ใƒซใ‚’ไป˜ใ‘ใพใ™ใ€‚ Evaluatorใฏใ€ExampleGenใ‹ใ‚‰ใฎใƒ‡ใƒผใ‚ฟใ€Trainerใ‹ใ‚‰ใฎใƒˆใƒฌใƒผใƒ‹ใƒณใ‚ฐๆธˆใฟใƒขใƒ‡ใƒซใ€ใŠใ‚ˆใณใ‚นใƒฉใ‚คใ‚นๆง‹ๆˆใ‚’ๅ…ฅๅŠ›ใจใ—ใฆๅ—ใ‘ๅ–ใ‚Šใพใ™ใ€‚ใ‚นใƒฉใ‚คใ‚นๆง‹ๆˆใซใ‚ˆใ‚Šใ€็‰นๅพดๅ€คใซ้–ขใ™ใ‚‹ๆŒ‡ๆจ™ใ‚’ใ‚นใƒฉใ‚คใ‚นใ™ใ‚‹ใ“ใจใŒใงใใพใ™ (ใŸใจใˆใฐใ€ๅˆๅ‰ 8 ๆ™‚ใ‹ใ‚‰ๅˆๅพŒ 8 ๆ™‚ใพใงใฎใ‚ฟใ‚ฏใ‚ทใƒผไน—่ปŠใงใƒขใƒ‡ใƒซใŒใฉใฎใ‚ˆใ†ใซๅ‹•ไฝœใ™ใ‚‹ใ‹ใชใฉ)ใ€‚ ใ“ใฎๆง‹ๆˆใฎไพ‹ใฏใ€ไปฅไธ‹ใ‚’ๅ‚็…งใ—ใฆใใ ใ•ใ„ใ€‚ End of explanation """ # Use TFMA to compute a evaluation statistics over features of a model and # validate them against a baseline. # The model resolver is only required if performing model validation in addition # to evaluation. In this case we validate against the latest blessed model. If # no model has been blessed before (as in this case) the evaluator will make our # candidate the first blessed model. model_resolver = tfx.dsl.Resolver( strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy, model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model), model_blessing=tfx.dsl.Channel( type=tfx.types.standard_artifacts.ModelBlessing)).with_id( 'latest_blessed_model_resolver') context.run(model_resolver, enable_cache=True) evaluator = tfx.components.Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) context.run(evaluator, enable_cache=True) """ Explanation: ๆฌกใซใ€ใ“ใฎๆง‹ๆˆใ‚’ Evaluatorใซๆธกใ—ใฆๅฎŸ่กŒใ—ใพใ™ใ€‚ End of explanation """ evaluator.outputs """ Explanation: Evaluator ใฎๅ‡บๅŠ›ใ‚ขใƒผใƒ†ใ‚ฃใƒ•ใ‚กใ‚ฏใƒˆใ‚’่ชฟในใฆใฟใพใ—ใ‚‡ใ†ใ€‚ End of explanation """ context.show(evaluator.outputs['evaluation']) """ Explanation: evaluationๅ‡บๅŠ›ใ‚’ไฝฟ็”จใ™ใ‚‹ใจใ€่ฉ•ไพกใ‚ปใƒƒใƒˆๅ…จไฝ“ใฎใ‚ฐใƒญใƒผใƒใƒซๆŒ‡ๆจ™ใฎใƒ‡ใƒ•ใ‚ฉใƒซใƒˆใฎ่ฆ–่ฆšๅŒ–ใ‚’่กจ็คบใงใใพใ™ใ€‚ End of explanation """ import tensorflow_model_analysis as tfma # Get the TFMA output result path and load the result. PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri tfma_result = tfma.load_eval_result(PATH_TO_RESULT) # Show data sliced along feature column trip_start_hour. tfma.view.render_slicing_metrics( tfma_result, slicing_column='trip_start_hour') """ Explanation: ใ‚นใƒฉใ‚คใ‚นใ•ใ‚ŒใŸ่ฉ•ไพกใƒกใƒˆใƒชใ‚ฏใ‚นใฎ่ฆ–่ฆšๅŒ–ใ‚’่กจ็คบใ™ใ‚‹ใซใฏใ€TensorFlow Model Analysis ใƒฉใ‚คใƒ–ใƒฉใƒชใ‚’็›ดๆŽฅๅ‘ผใณๅ‡บใ—ใพใ™ใ€‚ End of explanation """ blessing_uri = evaluator.outputs['blessing'].get()[0].uri !ls -l {blessing_uri} """ Explanation: ใ“ใฎ่ฆ–่ฆšๅŒ–ใฏๅŒใ˜ๆŒ‡ๆจ™ใ‚’็คบใ—ใฆใ„ใพใ™ใŒใ€่ฉ•ไพกใ‚ปใƒƒใƒˆๅ…จไฝ“ใงใฏใชใใ€trip_start_hourใฎใ™ในใฆใฎ็‰นๅพดๅ€คใง่จˆ็ฎ—ใ•ใ‚Œใฆใ„ใพใ™ใ€‚ TensorFlow ใƒขใƒ‡ใƒซๅˆ†ๆžใฏใ€ๅ…ฌๅนณๆ€งใ‚คใƒณใ‚ธใ‚ฑใƒผใ‚ฟใƒผใ‚„ใƒขใƒ‡ใƒซ ใƒ‘ใƒ•ใ‚ฉใƒผใƒžใƒณใ‚นใฎๆ™‚็ณปๅˆ—ใฎใƒ—ใƒญใƒƒใƒˆใชใฉใ€ไป–ใฎๅคšใใฎ่ฆ–่ฆšๅŒ–ใ‚’ใ‚ตใƒใƒผใƒˆใ—ใฆใ„ใพใ™ใ€‚ ่ฉณ็ดฐใซใคใ„ใฆใฏใ€ใƒใƒฅใƒผใƒˆใƒชใ‚ขใƒซใ‚’ๅ‚็…งใ—ใฆใใ ใ•ใ„ใ€‚ ๆง‹ๆˆใซใ—ใใ„ๅ€คใ‚’่ฟฝๅŠ ใ—ใŸใŸใ‚ใ€ๆคœ่จผๅ‡บๅŠ›ใ‚‚ๅˆฉ็”จใงใใพใ™ใ€‚{code 0}blessing{/code 0} ใ‚ขใƒผใƒ†ใ‚ฃใƒ•ใ‚กใ‚ฏใƒˆใฎๅญ˜ๅœจใฏใ€ใƒขใƒ‡ใƒซใŒๆคœ่จผใซๅˆๆ ผใ—ใŸใ“ใจใ‚’็คบใ—ใฆใ„ใพใ™ใ€‚ใ“ใ‚ŒใฏๅฎŸ่กŒใ•ใ‚Œใ‚‹ๆœ€ๅˆใฎๆคœ่จผใงใ‚ใ‚‹ใŸใ‚ใ€ๅ€™่ฃœใฏ่‡ชๅ‹•็š„ใซ bless ใ•ใ‚Œใพใ™ใ€‚ End of explanation """ PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri print(tfma.load_validation_result(PATH_TO_RESULT)) """ Explanation: ๆคœ่จผ็ตๆžœใƒฌใ‚ณใƒผใƒ‰ใ‚’่ชญใฟ่พผใฟใ€ๆˆๅŠŸใ‚’็ขบ่ชใ™ใ‚‹ใ“ใจใ‚‚ใงใใพใ™ใ€‚ End of explanation """ pusher = tfx.components.Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=_serving_model_dir))) context.run(pusher, enable_cache=True) """ Explanation: Pusher Pusher ใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฏ้€šๅธธใ€TFX ใƒ‘ใ‚คใƒ—ใƒฉใ‚คใƒณใฎๆœ€ๅพŒใซใ‚ใ‚Šใพใ™ใ€‚ใ“ใฎใ‚ณใƒณใƒใƒผใƒใƒณใƒˆใฏใƒขใƒ‡ใƒซใŒๆคœ่จผใซๅˆๆ ผใ—ใŸใ‹ใฉใ†ใ‹ใ‚’ใƒใ‚งใƒƒใ‚ฏใ—ใ€ๅˆๆ ผใ—ใŸๅ ดๅˆใฏใƒขใƒ‡ใƒซใ‚’ _serving_model_dirใซใ‚จใ‚ฏใ‚นใƒใƒผใƒˆใ—ใพใ™ใ€‚ End of explanation """ pusher.outputs """ Explanation: ๆฌกใซPusherใฎๅ‡บๅŠ›ใ‚ขใƒผใƒ†ใ‚ฃใƒ•ใ‚กใ‚ฏใƒˆใ‚’่ชฟในใฆใฟใพใ—ใ‚‡ใ†ใ€‚ End of explanation """ push_uri = pusher.outputs['pushed_model'].get()[0].uri model = tf.saved_model.load(push_uri) for item in model.signatures.items(): pp.pprint(item) """ Explanation: ็‰นใซใ€Pusher ใฏใƒขใƒ‡ใƒซใ‚’ๆฌกใฎใ‚ˆใ†ใช SavedModel ๅฝขๅผใงใ‚จใ‚ฏใ‚นใƒใƒผใƒˆใ—ใพใ™ใ€‚ End of explanation """
ganguli-lab/twpca
notebooks/warp_unit_tests.ipynb
mit
_, _, data = twpca.datasets.jittered_neuron() model = TWPCA(data, n_components=1, warpinit='identity') np.all(np.isclose(model.params['warp'], np.arange(model.shared_length), atol=1e-5, rtol=2)) np.nanmax(np.abs(model.transform() - data)) < 1e-5 """ Explanation: check identity warp does not change data appreciably End of explanation """ model = TWPCA(data, n_components=1, warpinit='shift') plt.imshow(np.squeeze(model.transform())) """ Explanation: check that shift initialization for warp solves the simple toy problem End of explanation """
oddt/notebooks
DUD-E.ipynb
bsd-3-clause
from __future__ import print_function, division, unicode_literals import oddt from oddt.datasets import dude print(oddt.__version__) """ Explanation: <h1>DUD-E: A Database of Useful Decoys: Enhanced</h1> End of explanation """ %%bash mkdir -p ./DUD-E_targets/ wget -qO- http://dude.docking.org/targets/ampc/ampc.tar.gz | tar xz -C ./DUD-E_targets/ wget -qO- http://dude.docking.org/targets/cxcr4/cxcr4.tar.gz | tar xz -C ./DUD-E_targets/ wget -qO- http://dude.docking.org/targets/pur2/pur2.tar.gz | tar xz -C ./DUD-E_targets/ wget -qO- http://dude.docking.org/targets/pygm/pygm.tar.gz | tar xz -C ./DUD-E_targets/ wget -qO- http://dude.docking.org/targets/sahh/sahh.tar.gz | tar xz -C ./DUD-E_targets/ directory = './DUD-E_targets' """ Explanation: We'd like to read files from DUD-E.<br/> You can download different targets and different numbers of targets, but I used only these five: ampc, cxcr4, pur2, pygm, sahh.<br/> End of explanation """ dude_database = dude(home=directory) """ Explanation: We will use the dude class. End of explanation """ target = dude_database['cxcr4'] """ Explanation: Now we can get one target or iterate over all targets in our directory. Let's choose one target. End of explanation """ target.ligand """ Explanation: target has four properties: protein, ligand, actives and decoys:<br/> protein - protein molecule<br/> ligand - ligand molecule<br/> actives - generator containing actives<br/> decoys - generator containing decoys End of explanation """ for target in dude_database: actives = list(target.actives) decoys = list(target.decoys) print('Target: ' + target.dude_id, 'Number of actives: ' + str(len(actives)), 'Number of decoys: ' + str(len(decoys)), sep='\t\t') """ Explanation: Let's see which target has the most actives and decoys. End of explanation """
iAInNet/tensorflow_in_action
_pratice_cifar10.ipynb
gpl-3.0
max_steps = 3000 batch_size = 128 data_dir = 'data/cifar10/cifar-10-batches-bin/' model_dir = 'model/_cifar10_v2/' """ Explanation: ๅ…จๅฑ€ๅ‚ๆ•ฐ End of explanation """ X_train, y_train = cifar10_input.distorted_inputs(data_dir, batch_size) X_test, y_test = cifar10_input.inputs(eval_data=True, data_dir=data_dir, batch_size=batch_size) image_holder = tf.placeholder(tf.float32, [batch_size, 24, 24, 3]) label_holder = tf.placeholder(tf.int32, [batch_size]) """ Explanation: ๅˆๅง‹ๅŒ–ๆƒ้‡ ๅฆ‚ๆžœ้œ€่ฆ๏ผŒไผš็ป™ๆƒ้‡ๅŠ ไธŠL2 lossใ€‚ไธบไบ†ๅœจๅŽ้ข่ฎก็ฎ—็ฅž็ป็ฝ‘็ปœ็š„ๆ€ปไฝ“loss็š„ๆ—ถๅ€™่ขซ็”จไธŠ๏ผŒ้œ€่ฆ็ปŸไธ€ๅญ˜ๅˆฐไธ€ไธชcollectionใ€‚ ๅŠ ่ฝฝๆ•ฐๆฎ ไฝฟ็”จcifa10_inputๆฅ่Žทๅ–ๆ•ฐๆฎ๏ผŒ่ฟ™ไธชๆ–‡ไปถๆฅ่‡ชtensorflow github๏ผŒๅฏไปฅไธ‹่ฝฝไธ‹ๆฅ็›ดๆŽฅไฝฟ็”จใ€‚ๅฆ‚ๆžœไฝฟ็”จdistorted_inputๆ–นๆณ•๏ผŒ้‚ฃไนˆๅพ—ๅˆฐ็š„ๆ•ฐๆฎๆ˜ฏ็ป่ฟ‡ๅขžๅผบๅค„็†็š„ใ€‚ไผšๅฏนๅ›พ็‰‡้šๆœบๅšๅ‡บๅˆ‡็‰‡ใ€็ฟป่ฝฌใ€ไฟฎๆ”นไบฎๅบฆใ€ไฟฎๆ”นๅฏนๆฏ”ๅบฆ็ญ‰ๆ“ไฝœใ€‚่ฟ™ๆ ทๅฐฑ่ƒฝๅคšๆ ทๅŒ–ๆˆ‘ไปฌ็š„่ฎญ็ปƒๆ•ฐๆฎใ€‚ ๅพ—ๅˆฐไธ€ไธชtensor๏ผŒbatch_sizeๅคงๅฐ็š„batchใ€‚ๅนถไธ”ๅฏไปฅ่ฟญไปฃ็š„่ฏปๅ–ไธ‹ไธ€ไธชbatchใ€‚ End of explanation """ weight1 = variable_with_weight_loss([5, 5, 3, 64], stddev=0.05, lambda_value=0) kernel1 = tf.nn.conv2d(image_holder, weight1, [1, 1, 1, 1], padding='SAME') bias1 = tf.Variable(tf.constant(0.0, shape=[64])) conv1 = tf.nn.relu(tf.nn.bias_add(kernel1, bias1)) pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) """ Explanation: ็ฌฌไธ€ไธชๅท็งฏๅฑ‚ ๅŒๆ ท็š„๏ผŒๆˆ‘ไปฌไฝฟ็”จ5x5ๅท็งฏๆ ธ๏ผŒ3ไธช้€š้“๏ผˆinput_channel๏ผ‰๏ผŒ64ไธชoutput_channelใ€‚ไธๅฏน็ฌฌไธ€ๅฑ‚็š„ๅ‚ๆ•ฐๅšๆญฃๅˆ™ๅŒ–๏ผŒๆ‰€ไปฅๅฐ†lambda_value่ฎพๅฎšไธบ0ใ€‚ๅ…ถไธญๆถ‰ๅŠๅˆฐไธ€ไธชๅฐๆŠ€ๅทง๏ผŒๅฐฑๆ˜ฏๅœจpoolๅฑ‚๏ผŒไฝฟ็”จไบ†3x3ๅคงๅฐ็š„ksize๏ผŒไฝ†ๆ˜ฏไฝฟ็”จ2x2็š„stride๏ผŒ่ฟ™ๆ ทๅขžๅŠ ๆ•ฐๆฎ็š„ไธฐๅฏŒๆ€งใ€‚ๆœ€ๅŽไฝฟ็”จLRNใ€‚LRNๆœ€ๆ—ฉ่งไบŽAlexๅ‚่งImageNet็š„็ซž่ต›็š„้‚ฃ็ฏ‡CNN่ฎบๆ–‡ไธญ๏ผŒAlexๅœจ่ฎบๆ–‡ไธญ่งฃ้‡Šไบ†LRNๅฑ‚ๆจกไปฟไบ†็”Ÿ็‰ฉ็ฅž็ป็ณป็ปŸ็š„โ€œไพงๆŠ‘ๅˆถโ€ๆœบๅˆถ๏ผŒๅฏนๅฑ€้ƒจ็ฅž็ปๅ…ƒ็š„ๆดปๅŠจๅˆ›ๅปบ็ซžไบ‰็Žฏๅขƒ๏ผŒไฝฟๅพ—ๅ…ถไธญๅ“ๅบ”ๆฏ”่พƒๅคง็š„ๅ€ผๅ˜ๅพ—็›ธๅฏนๆ›ดๅคง๏ผŒๅนถๆŠ‘ๅˆถๅ…ถไป–ๅ้ฆˆ่พƒๅฐ็š„็ฅž็ปๅ…ƒ๏ผŒๅขžๅŠ ไบ†ๆจกๅž‹็š„ๆณ›ๅŒ–่ƒฝๅŠ›ใ€‚ไธ่ฟ‡ๅœจไน‹ๅŽ็š„VGGNet่ฎบๆ–‡ไธญ๏ผŒๅฏนๆฏ”ไบ†ไฝฟ็”จๅ’Œไธไฝฟ็”จLRNไธค็งๆจกๅž‹๏ผŒ็ป“ๆžœ่กจๆ˜ŽLRNๅนถไธ่ƒฝๆ้ซ˜ๆจกๅž‹็š„ๆ€ง่ƒฝใ€‚ไธ่ฟ‡่ฟ™้‡Œ่ฟ˜ๆ˜ฏๅŸบไบŽAlexNet็š„่ฎพ่ฎกๅฐ†ๅ…ถๅŠ ไธŠใ€‚ End of explanation """ weight2 = variable_with_weight_loss(shape=[5, 5, 64, 64], stddev=5e-2, lambda_value=0.0) kernel2 = tf.nn.conv2d(norm1, weight2, strides=[1, 1, 1, 1], padding='SAME') bias2 = tf.Variable(tf.constant(0.1, shape=[64])) conv2 = tf.nn.relu(tf.nn.bias_add(kernel2, bias2)) norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001/9.0, beta=0.75) pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') """ Explanation: ็ฌฌไบŒไธชๅท็งฏๅฑ‚ ่พ“ๅ…ฅ64ไธชchannel๏ผŒ่พ“ๅ‡บไพ็„ถๆ˜ฏ64ไธชchannel ่ฎพๅฎšbias็š„ๅคงๅฐไธบ0.1 ่ฐƒๆขๆœ€ๅคงๆฑ ๅŒ–ๅฑ‚ๅ’ŒLRN็š„้กบๅบ๏ผŒๅ…ˆ่ฟ›่กŒLRN็„ถๅŽๅ†ๆœ€ๅคงๆฑ ๅŒ–ๅฑ‚ ไฝ†ๆ˜ฏไธบไป€ไนˆ่ฆ่ฟ™ไนˆๅš๏ผŒๅฎŒๅ…จไธ็Ÿฅ้“๏ผŸ ๅคš็œ‹่ฎบๆ–‡ใ€‚ End of explanation """ flattern = tf.reshape(pool2, [batch_size, -1]) dim = flattern.get_shape()[1].value weight3 = variable_with_weight_loss(shape=[dim, 384], stddev=0.04, lambda_value=0.04) bias3 = tf.Variable(tf.constant(0.1, shape=[384])) local3 = tf.nn.relu(tf.matmul(flattern, weight3) + bias3) """ Explanation: ็ฌฌไธ€ไธชๅ…จ่ฟžๆŽฅๅฑ‚ ่ฆๅฐ†ๅท็งฏๅฑ‚ๆ‹‰ไผธ ๅ…จ่ฟžๆŽฅๅˆฐๆ–ฐ็š„้š่—ๅฑ‚๏ผŒ่ฎพๅฎšไธบ384ไธช่Š‚็‚น ๆญฃๆ€ๅˆ†ๅธƒ่ฎพๅฎšไธบ0.04๏ผŒbias่ฎพๅฎšไธบ0.1 ้‡็‚นๆ˜ฏ๏ผŒๅœจ่ฟ™้‡Œๆˆ‘ไปฌ่ฟ˜่ฎพๅฎšweight loss็š„lambdaๆ•ฐๅ€ผไธบ0.04 End of explanation """ weight4 = variable_with_weight_loss(shape=[384, 192], stddev=0.04, lambda_value=0.04) bias4 = tf.Variable(tf.constant(0.1, shape=[192])) local4 = tf.nn.relu(tf.matmul(local3, weight4) + bias4) """ Explanation: ็ฌฌไบŒไธชๅ…จ่ฟžๆŽฅๅฑ‚ ไธ‹้™ไธบ192ไธช่Š‚็‚น๏ผŒๅ‡ๅฐ‘ไบ†ไธ€ๅŠ End of explanation """ weight5 = variable_with_weight_loss(shape=[192, 10], stddev=1/192.0, lambda_value=0.0) bias5 = tf.Variable(tf.constant(0.0, shape=[10])) logits = tf.add(tf.matmul(local4, weight5), bias5) def loss(logits, labels): labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels, name = 'cross_entropy_per_example' ) cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) return tf.add_n(tf.get_collection('losses'), name='total_loss') loss = loss(logits, label_holder) train_op = tf.train.AdamOptimizer(1e-3).minimize(loss) """ Explanation: ่พ“ๅ‡บๅฑ‚ ๆœ€ๅŽๆœ‰10ไธช็ฑปๅˆซ End of explanation """ top_k_op = tf.nn.in_top_k(logits, label_holder, 1) sess = tf.InteractiveSession() saver = tf.train.Saver() tf.global_variables_initializer().run() """ Explanation: ไฝฟ็”จin_top_kๆฅ่พ“ๅ‡บtop k็š„ๅ‡†็กฎ็Ž‡๏ผŒ้ป˜่ฎคไฝฟ็”จtop 1ใ€‚ๅธธ็”จ็š„ๅฏไปฅๆ˜ฏtop 5ใ€‚ End of explanation """ tf.train.start_queue_runners() """ Explanation: ๅฏๅŠจcaifar_inputไธญ้œ€่ฆ็”จ็š„็บฟ็จ‹้˜Ÿๅˆ—ใ€‚ไธป่ฆ็”จ้€”ๆ˜ฏๅ›พ็‰‡ๆ•ฐๆฎๅขžๅผบใ€‚่ฟ™้‡Œๆ€ปๅ…ฑไฝฟ็”จไบ†16ไธช็บฟ็จ‹ๆฅๅค„็†ๅ›พ็‰‡ใ€‚ End of explanation """ for step in range(max_steps): start_time = time.time() image_batch, label_batch = sess.run([X_train, y_train]) _, loss_value = sess.run([train_op, loss], feed_dict={image_holder: image_batch, label_holder: label_batch}) duration = time.time() - start_time if step % 10 == 0: examples_per_sec = batch_size / duration sec_this_batch = float(duration) format_str = ('step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)') print(format_str % (step, loss_value, examples_per_sec, sec_this_batch)) saver.save(sess, save_path=os.path.join(model_dir, 'model.chpt'), global_step=max_steps) num_examples = 10000 num_iter = int(math.ceil(num_examples / batch_size)) ture_count = 0 total_sample_count = num_iter * batch_size step = 0 while step < num_iter: image_batch, label_batch = sess.run([X_test, y_test]) predictions = sess.run([top_k_op], feed_dict={image_holder: image_batch, label_holder: label_batch}) true_count += np.sum(predictions) step += 1 precision = ture_count / total_sample_count print("Precision @ 1 = %.3f" % precision) sess.close() """ Explanation: ๆฏๆฌกๅœจ่ฎก็ฎ—ไน‹ๅ‰๏ผŒๅ…ˆๆ‰ง่กŒimage_train,label_trainๆฅ่Žทๅ–ไธ€ไธชbatch_sizeๅคงๅฐ็š„่ฎญ็ปƒๆ•ฐๆฎใ€‚็„ถๅŽ๏ผŒfeedๅˆฐtrain_opๅ’Œlossไธญ๏ผŒ่ฎญ็ปƒๆ ทๆœฌใ€‚ๆฏ10ๆฌก่ฟญไปฃ่ฎก็ฎ—ๅฐฑไผš่พ“ๅ‡บไธ€ไบ›ๅฟ…่ฆ็š„ไฟกๆฏใ€‚ End of explanation """
mitdbg/modeldb
demos/webinar-2020-5-6/02-mdb_versioned/01-train/01 Basic NLP.ipynb
mit
!python -m spacy download en_core_web_sm """ Explanation: Versioning Example (Part 1/3) In this example, we'll train an NLP model for sentiment analysis of tweets using spaCy. Through this series, we'll take advantage of ModelDB's versioning system to keep track of changes. This workflow requires verta&gt;=0.14.4 and spaCy&gt;=2.0.0. Setup Download a spaCy model to train. End of explanation """ from __future__ import unicode_literals, print_function import boto3 import json import numpy as np import pandas as pd import spacy """ Explanation: Import libraries we'll need. End of explanation """ from verta import Client client = Client('http://localhost:3000/') proj = client.set_project('Tweet Classification') expt = client.set_experiment('SpaCy') """ Explanation: Bring in Verta's ModelDB client to organize our work, and log and version metadata. End of explanation """ S3_BUCKET = "verta-starter" S3_KEY = "english-tweets.csv" FILENAME = S3_KEY boto3.client('s3').download_file(S3_BUCKET, S3_KEY, FILENAME) """ Explanation: Prepare Data Download a dataset of English tweets from S3 for us to train with. End of explanation """ import utils data = pd.read_csv(FILENAME).sample(frac=1).reset_index(drop=True) utils.clean_data(data) data.head() """ Explanation: Then we'll load and clean the data. End of explanation """ from verta.code import Notebook from verta.configuration import Hyperparameters from verta.dataset import S3 from verta.environment import Python code_ver = Notebook() # Notebook & git environment config_ver = Hyperparameters({'n_iter': 20}) dataset_ver = S3("s3://{}/{}".format(S3_BUCKET, S3_KEY)) env_ver = Python(Python.read_pip_environment()) # pip environment and Python version """ Explanation: Capture and Version Model Ingredients We'll first capture metadata about our code, configuration, dataset, and environment using utilities from the verta library. End of explanation """ repo = client.set_repository('Tweet Classification') commit = repo.get_commit(branch='master') """ Explanation: Then, to log them, we'll use a ModelDB repository to prepare a commit. End of explanation """ commit.update("notebooks/tweet-analysis", code_ver) commit.update("config/hyperparams", config_ver) commit.update("data/tweets", dataset_ver) commit.update("env/python", env_ver) commit.save("Initial model") commit """ Explanation: Now we'll add these versioned components to the commit and save it to ModelDB. End of explanation """ nlp = spacy.load('en_core_web_sm') """ Explanation: Train and Log Model We'll use the pre-trained spaCy model we downloaded earlier... End of explanation """ import training training.train(nlp, data, n_iter=20) """ Explanation: ...and fine-tune it with our dataset. End of explanation """ run = client.set_experiment_run() run.log_model(nlp) """ Explanation: Now that our model is good to go, we'll log it to ModelDB so our progress is never lost. Using Verta's ModelDB Client, we'll create an Experiment Run to encapsulate our work, and log our model as an artifact. End of explanation """ run.log_commit( commit, { 'notebook': "notebooks/tweet-analysis", 'hyperparameters': "config/hyperparams", 'training_data': "data/tweets", 'python_env': "env/python", }, ) """ Explanation: And finally, we'll link the commit we created earlier to the Experiment Run to complete our logged model version. End of explanation """
cipri-tom/Swiss-on-Amazon
filter_swiss_helpful_reviews.ipynb
gpl-3.0
%matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np import yaml """ Explanation: The following script extracts the (more) helpful reviews from the swiss reviews and saves them locally. From the extracted reviews it also saves a list with their asin identifiers. The list of asin identifiers will be later used to to find the average review rating for the respective products. End of explanation """ with open("data/swiss-reviews.txt", 'r') as fp: swiss_rev = fp.readlines() len(swiss_rev) swiss_rev[2] """ Explanation: Load the swiss reviews End of explanation """ def filter_helpful(line): l = line.rstrip('\n') l = yaml.load(l) if('helpful' in l.keys()): if(l['helpful'][1] >= 5): return True else: return False else: print("Review does not have helpful score key: "+line) return False """ Explanation: The filter_helpful function keeps only the reviews which had at least 5 flags/votes in the helpfulness field. This amounts to a subset of around 23000 reviews. A smaller subset of around 10000 reviews was obtained as well by only keeping reviews with 10 flags/votes. The main advantage of the smaller subset is that it contains better quality reviews while its drawback is, of course, the reduced size. 1) Extract the helpful reviews End of explanation """ def get_helpful(data): res = [] counter = 1 i = 0 for line in data: i += 1 if(filter_helpful(line)): if(counter % 1000 == 0): print("Count "+str(counter)+" / "+str(i)) counter += 1 res.append(line) return res swiss_reviews_helpful = get_helpful(swiss_rev) len(swiss_reviews_helpful) """ Explanation: Apply the filter_helpful to each swiss product review End of explanation """ write_file = open('data/swiss-reviews-helpful-correct-bigger.txt', 'w') for item in swiss_reviews_helpful: write_file.write(item) write_file.close() """ Explanation: Save the subset with helpful swiss product reviews End of explanation """ with open('data/swiss-reviews-helpful-correct-bigger.txt', 'r') as fp: swiss_reviews_helpful = fp.readlines() """ Explanation: 2) Extract the asins of the products which the helpful reviews correspond to End of explanation """ def filter_asin(line): l = line.rstrip('\n') l = yaml.load(l) if('asin' in l.keys()): return l['asin'] else: return '' helpful_asins = [] counter = 1 for item in swiss_reviews_helpful: if(counter%500 == 0): print(counter) counter += 1 x = filter_asin(item) if(len(x) > 0): helpful_asins.append(x) """ Explanation: The following function simply extracts the 'asin' from the helpful reviews. Repetitions of the asins are of no consequence, as the list is just meant to be a check up. End of explanation """ import pickle with open('data/helpful_asins_bigger.pickle', 'wb') as fp: pickle.dump(helpful_asins, fp) """ Explanation: Save the list of asins. End of explanation """
simonsfoundation/CaImAn
demos/notebooks/demo_Ring_CNN.ipynb
gpl-2.0
get_ipython().magic('load_ext autoreload') get_ipython().magic('autoreload 2') import glob import logging import numpy as np import os logging.basicConfig(format= "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", # filename="/tmp/caiman.log", level=logging.WARNING) import caiman as cm from caiman.source_extraction import cnmf as cnmf from caiman.utils.utils import download_demo import matplotlib.pyplot as plt import bokeh.plotting as bpl bpl.output_notebook() """ Explanation: Example of 1p online analysis using a Ring CNN + OnACID The demo shows how to perform online analysis on one photon data using a Ring CNN for extracting the background followed by processing using the OnACID algorithm. The algorithm relies on the usage a GPU to efficiently estimate and apply the background model so it is recommended to have access to a GPU when running this notebook. End of explanation """ fnames=download_demo('blood_vessel_10Hz.mat') """ Explanation: First specify the data file(s) to be analyzed The download_demo method will download the file (if not already present) and store it inside your caiman_data/example_movies folder. You can specify any path to files you want to analyze. End of explanation """ reuse_model = False # set to True to re-use an existing ring model path_to_model = None # specify a pre-trained model here if needed gSig = (7, 7) # expected half size of neurons gnb = 2 # number of background components for OnACID init_batch = 500 # number of frames for initialization and training params_dict = {'fnames': fnames, 'var_name_hdf5': 'Y', # name of variable inside mat file where the data is stored 'fr': 10, # frame rate (Hz) 'decay_time': 0.5, # approximate length of transient event in seconds 'gSig': gSig, 'p': 0, # order of AR indicator dynamics 'ring_CNN': True, # SET TO TRUE TO USE RING CNN 'min_SNR': 2.65, # minimum SNR for accepting new components 'SNR_lowest': 0.75, # reject components with SNR below this value 'use_cnn': False, # do not use CNN based test for components 'use_ecc': True, # test eccentricity 'max_ecc': 2.625, # reject components with eccentricity above this value 'rval_thr': 0.70, # correlation threshold for new component inclusion 'rval_lowest': 0.25, # reject components with corr below that value 'ds_factor': 1, # spatial downsampling factor (increases speed but may lose some fine structure) 'nb': gnb, 'motion_correct': False, # Flag for motion correction 'init_batch': init_batch, # number of frames for initialization (presumably from the first file) 'init_method': 'bare', 'normalize': False, 'expected_comps': 1100, # maximum number of expected components used for memory pre-allocation (exaggerate here) 'sniper_mode': False, # flag using a CNN to detect new neurons (o/w space correlation is used) 'dist_shape_update' : True, # flag for updating shapes in a distributed way 'min_num_trial': 5, # number of candidate components per frame 'epochs': 3, # number of total passes over the data 'stop_detection': True, # Run a last epoch without detecting new neurons 'K': 50, # initial number of components 'lr': 6e-4, 'lr_scheduler': [0.9, 6000, 10000], 'pct': 0.01, 'path_to_model': path_to_model, # where the ring CNN model is saved/loaded 'reuse_model': reuse_model # flag for re-using a ring CNN model } opts = cnmf.params.CNMFParams(params_dict=params_dict) """ Explanation: Set up some parameters Here we set up some parameters for specifying the ring model and running OnACID. We use the same params object as in batch processing with CNMF. End of explanation """ run_onacid = True if run_onacid: cnm = cnmf.online_cnmf.OnACID(params=opts) cnm.fit_online() fld_name = os.path.dirname(cnm.params.ring_CNN['path_to_model']) res_name_nm = os.path.join(fld_name, 'onacid_results_nm.hdf5') cnm.save(res_name_nm) # save initial results (without any postprocessing) else: fld_name = os.path.dirname(path_to_model) res_name = os.path.join(fld_name, 'onacid_results.hdf5') cnm = cnmf.online_cnmf.load_OnlineCNMF(res_name) cnm.params.data['fnames'] = fnames """ Explanation: Now run the Ring-CNN + CaImAn online algorithm (OnACID). The first initbatch frames are used for training the ring-CNN model. Once the model is trained the background is subtracted and the different is used for initialization purposes. The initialization method chosen here bare will only search for a small number of neurons and is mostly used to initialize the background components. Initialization with the full CNMF can also be used by choosing cnmf. We first create an OnACID object located in the module online_cnmf and we pass the parameters similarly to the case of batch processing. We then run the algorithm using the fit_online method. We then save the results inside the folder where the Ring_CNN model is saved. End of explanation """ ds = 10 # plot every ds frames to make more manageable figures init_batch = 500 dims, T = cnmf.utilities.get_file_size(fnames, var_name_hdf5='Y') T = np.array(T).sum() n_epochs = cnm.params.online['epochs'] T_detect = 1e3*np.hstack((np.zeros(init_batch), cnm.t_detect)) T_shapes = 1e3*np.hstack((np.zeros(init_batch), cnm.t_shapes)) T_online = 1e3*np.hstack((np.zeros(init_batch), cnm.t_online)) - T_detect - T_shapes plt.figure() plt.stackplot(np.arange(len(T_detect))[::ds], T_online[::ds], T_detect[::ds], T_shapes[::ds], colors=['tab:red', 'tab:purple', 'tab:brown']) plt.legend(labels=['process', 'detect', 'shapes'], loc=2) plt.title('Processing time allocation') plt.xlabel('Frame #') plt.ylabel('Processing time [ms]') max_val = 80 plt.ylim([0, max_val]); plt.plot([init_batch, init_batch], [0, max_val], '--k') for i in range(n_epochs - 1): plt.plot([(i+1)*T, (i+1)*T], [0, max_val], '--k') plt.xlim([0, n_epochs*T]); plt.savefig(os.path.join(fld_name, 'time_per_frame_ds.pdf'), bbox_inches='tight', pad_inches=0) init_batch = 500 plt.figure() tc_init = cnm.t_init*np.ones(T*n_epochs) ds = 10 #tc_mot = np.hstack((np.zeros(init_batch), np.cumsum(T_motion)/1000)) tc_prc = np.cumsum(T_online)/1000#np.hstack((np.zeros(init_batch), )) tc_det = np.cumsum(T_detect)/1000#np.hstack((np.zeros(init_batch), )) tc_shp = np.cumsum(T_shapes)/1000#np.hstack((np.zeros(init_batch), )) plt.stackplot(np.arange(len(tc_init))[::ds], tc_init[::ds], tc_prc[::ds], tc_det[::ds], tc_shp[::ds], colors=['g', 'tab:red', 'tab:purple', 'tab:brown']) plt.legend(labels=['initialize', 'process', 'detect', 'shapes'], loc=2) plt.title('Processing time allocation') plt.xlabel('Frame #') plt.ylabel('Processing time [s]') max_val = (tc_prc[-1] + tc_det[-1] + tc_shp[-1] + cnm.t_init)*1.05 for i in range(n_epochs - 1): plt.plot([(i+1)*T, (i+1)*T], [0, max_val], '--k') plt.xlim([0, n_epochs*T]); plt.ylim([0, max_val]) plt.savefig(os.path.join(fld_name, 'time_cumulative_ds.pdf'), bbox_inches='tight', pad_inches=0) print('Cost of estimating model and running first epoch: {:.2f}s'.format(tc_prc[T] + tc_det[T] + tc_shp[T] + tc_init[T])) """ Explanation: Check speed Create some plots that show the speed per frame and cumulatively End of explanation """ # first compute background summary images images = cm.load(fnames, var_name_hdf5='Y', subindices=slice(None, None, 2)) cn_filter, pnr = cm.summary_images.correlation_pnr(images, gSig=3, swap_dim=False) # change swap dim if output looks weird, it is a problem with tiffile plt.figure(figsize=(15, 7)) plt.subplot(1,2,1); plt.imshow(cn_filter); plt.colorbar() plt.subplot(1,2,2); plt.imshow(pnr); plt.colorbar() cnm.estimates.plot_contours_nb(img=cn_filter, idx=cnm.estimates.idx_components, line_color='white', thr=0.3) """ Explanation: Do some initial plotting End of explanation """ cnm.estimates.nb_view_components(img=cn_filter, denoised_color='red') """ Explanation: View components Now inspect the components extracted by OnACID. Note that if single pass was used then several components would be non-zero only for the part of the time interval indicating that they were detected online by OnACID. Note that if you get data rate error you can start Jupyter notebooks using: 'jupyter notebook --NotebookApp.iopub_data_rate_limit=1.0e10' End of explanation """ save_file = True if save_file: from caiman.utils.nn_models import create_LN_model model_LN = create_LN_model(images, shape=opts.data['dims'] + (1,), n_channels=opts.ring_CNN['n_channels'], width=opts.ring_CNN['width'], use_bias=opts.ring_CNN['use_bias'], gSig=gSig[0], use_add=opts.ring_CNN['use_add']) model_LN.load_weights(cnm.params.ring_CNN['path_to_model']) # Load the data in batches and save them m = [] saved_files = [] batch_length = 256 for i in range(0, T, batch_length): images = cm.load(fnames, var_name_hdf5='Y', subindices=slice(i, i + batch_length)) images_filt = np.squeeze(model_LN.predict(np.expand_dims(images, axis=-1))) temp_file = os.path.join(fld_name, 'pfc_back_removed_' + format(i, '05d') + '.h5') saved_files.append(temp_file) m = cm.movie(np.maximum(images - images_filt, 0)) m.save(temp_file) else: saved_files = glob.glob(os.path.join(fld_name, 'pfc_back_removed_*')) saved_files.sort() fname_mmap = cm.save_memmap([saved_files], order='C', border_to_0=0) Yr, dims, T = cm.load_memmap(fname_mmap) images_mmap = Yr.T.reshape((T,) + dims, order='F') """ Explanation: Load ring model to filter the data Filter the data with the learned Ring CNN model and a create memory mapped file with the background subtracted data. We will use this to run the quality tests and screen for false positive components. End of explanation """ cnm.params.merging['merge_thr'] = 0.7 cnm.estimates.c1 = np.zeros(cnm.estimates.A.shape[-1]) cnm.estimates.bl = np.zeros(cnm.estimates.A.shape[-1]) cnm.estimates.neurons_sn = np.zeros(cnm.estimates.A.shape[-1]) cnm.estimates.g = None #np.ones((cnm.estimates.A.shape[-1], 1))*.9 cnm.estimates.merge_components(Yr, cnm.params) """ Explanation: Merge components End of explanation """ cnm.params.quality cnm.estimates.evaluate_components(imgs=images_mmap, params=cnm.params) cnm.estimates.plot_contours_nb(img=cn_filter, idx=cnm.estimates.idx_components, line_color='white') cnm.estimates.nb_view_components(idx=cnm.estimates.idx_components, img=cn_filter) """ Explanation: Evaluate components and compare again We run the component evaluation tests to screen for false positive components. End of explanation """ cnmfe_results = download_demo('online_vs_offline.npz') locals().update(np.load(cnmfe_results, allow_pickle=True)) A_patch_good = A_patch_good.item() estimates_gt = cnmf.estimates.Estimates(A=A_patch_good, C=C_patch_good, dims=dims) maxthr=0.01 cnm.estimates.A_thr=None cnm.estimates.threshold_spatial_components(maxthr=maxthr) estimates_gt.A_thr=None estimates_gt.threshold_spatial_components(maxthr=maxthr*10) min_size = np.pi*(gSig[0]/1.5)**2 max_size = np.pi*(gSig[0]*1.5)**2 ntk = cnm.estimates.remove_small_large_neurons(min_size_neuro=min_size, max_size_neuro=2*max_size) gtk = estimates_gt.remove_small_large_neurons(min_size_neuro=min_size, max_size_neuro=2*max_size) m1, m2, nm1, nm2, perf = cm.base.rois.register_ROIs(estimates_gt.A_thr[:, estimates_gt.idx_components], cnm.estimates.A_thr[:, cnm.estimates.idx_components], dims, align_flag=False, thresh_cost=.7, plot_results=True, Cn=cn_filter, enclosed_thr=None)[:-1] """ Explanation: Compare against CNMF-E results We download the results of CNMF-E on the same dataset and compare. End of explanation """ for k, v in perf.items(): print(k + ':', '%.4f' % v, end=' ') """ Explanation: Print performance results End of explanation """ res_name = os.path.join(fld_name, 'onacid_results.hdf5') cnm.save(res_name) """ Explanation: Save the results End of explanation """ import matplotlib.lines as mlines lp, hp = np.nanpercentile(cn_filter, [5, 98]) A_onacid = cnm.estimates.A_thr.toarray().copy() A_onacid /= A_onacid.max(0) A_TP = estimates_gt.A[:, m1].toarray() #cnm.estimates.A[:, cnm.estimates.idx_components[m2]].toarray() A_TP = A_TP.reshape(dims + (-1,), order='F').transpose(2,0,1) A_FN = estimates_gt.A[:, nm1].toarray() A_FN = A_FN.reshape(dims + (-1,), order='F').transpose(2,0,1) A_FP = A_onacid[:,cnm.estimates.idx_components[nm2]] A_FP = A_FP.reshape(dims + (-1,), order='F').transpose(2,0,1) plt.figure(figsize=(15, 12)) plt.imshow(cn_filter, vmin=lp, vmax=hp, cmap='viridis') plt.colorbar(); for aa in A_TP: plt.contour(aa, [0.05], colors='k'); for aa in A_FN: plt.contour(aa, [0.05], colors='r'); for aa in A_FP: plt.contour(aa, [0.25], colors='w'); cl = ['k', 'r', 'w'] lb = ['both', 'CNMF-E only', 'ring CNN only'] day = [mlines.Line2D([], [], color=cl[i], label=lb[i]) for i in range(3)] plt.legend(handles=day, loc=3) plt.axis('off'); plt.margins(0, 0); plt.savefig(os.path.join(fld_name, 'ring_CNN_contours_gSig_3.pdf'), bbox_inches='tight', pad_inches=0) A_rej = cnm.estimates.A[:, cnm.estimates.idx_components_bad].toarray() A_rej = A_rej.reshape(dims + (-1,), order='F').transpose(2,0,1) plt.figure(figsize=(15, 15)) plt.imshow(cn_filter, vmin=lp, vmax=hp, cmap='viridis') plt.title('Rejected Components') for aa in A_rej: plt.contour(aa, [0.05], colors='w'); """ Explanation: Make some plots End of explanation """ from caiman.utils.nn_models import create_LN_model model_LN = create_LN_model(images, shape=opts.data['dims'] + (1,), n_channels=opts.ring_CNN['n_channels'], width=opts.ring_CNN['width'], use_bias=opts.ring_CNN['use_bias'], gSig=gSig[0], use_add=opts.ring_CNN['use_add']) model_LN.load_weights(cnm.params.ring_CNN['path_to_model']) W = model_LN.get_weights() plt.figure(figsize=(10, 10)) plt.subplot(2,2,1); plt.imshow(np.squeeze(W[0][:,:,:,0])); plt.colorbar(); plt.title('Ring Kernel 1') plt.subplot(2,2,2); plt.imshow(np.squeeze(W[0][:,:,:,1])); plt.colorbar(); plt.title('Ring Kernel 2') plt.subplot(2,2,3); plt.imshow(np.squeeze(W[-1][:,:,0])); plt.colorbar(); plt.title('Multiplicative Layer 1') plt.subplot(2,2,4); plt.imshow(np.squeeze(W[-1][:,:,1])); plt.colorbar(); plt.title('Multiplicative Layer 2'); """ Explanation: Show the learned filters End of explanation """ m1 = cm.load(fnames, var_name_hdf5='Y') # original data m2 = cm.load(fname_mmap) # background subtracted data m3 = m1 - m2 # estimated background m4 = cm.movie(cnm.estimates.A[:,cnm.estimates.idx_components].dot(cnm.estimates.C[cnm.estimates.idx_components])).reshape(dims + (T,)).transpose(2,0,1) # estimated components nn = 0.01 mm = 1 - nn/4 # normalize movies by quantiles m1 = (m1 - np.quantile(m1[:1000], nn))/(np.quantile(m1[:1000], mm) - np.quantile(m1[:1000], nn)) m2 = (m2 - np.quantile(m2[:1000], nn))/(np.quantile(m2[:1000], mm) - np.quantile(m2[:1000], nn)) m3 = (m3 - np.quantile(m3[:1000], nn))/(np.quantile(m3[:1000], mm) - np.quantile(m3[:1000], nn)) m4 = (m4 - np.quantile(m4[:1000], nn))/(np.quantile(m4[:1000], mm) - np.quantile(m4[:1000], nn)) m = cm.concatenate((cm.concatenate((m1.transpose(0,2,1), m3.transpose(0,2,1)), axis=2), cm.concatenate((m2.transpose(0,2,1), m4), axis=2)), axis=1) m[:3000].play(magnification=2, q_min=1, plot_text=True, save_movie=True, movie_name=os.path.join(fld_name, 'movie.avi')) """ Explanation: Make a movie End of explanation """
Kaggle/learntools
notebooks/deep_learning_intro/raw/tut3.ipynb
apache-2.0
#$HIDE_INPUT$ import pandas as pd from IPython.display import display red_wine = pd.read_csv('../input/dl-course-data/red-wine.csv') # Create training and validation splits df_train = red_wine.sample(frac=0.7, random_state=0) df_valid = red_wine.drop(df_train.index) display(df_train.head(4)) # Scale to [0, 1] max_ = df_train.max(axis=0) min_ = df_train.min(axis=0) df_train = (df_train - min_) / (max_ - min_) df_valid = (df_valid - min_) / (max_ - min_) # Split features and target X_train = df_train.drop('quality', axis=1) X_valid = df_valid.drop('quality', axis=1) y_train = df_train['quality'] y_valid = df_valid['quality'] """ Explanation: Introduction In the first two lessons, we learned how to build fully-connected networks out of stacks of dense layers. When first created, all of the network's weights are set randomly -- the network doesn't "know" anything yet. In this lesson we're going to see how to train a neural network; we're going to see how neural networks learn. As with all machine learning tasks, we begin with a set of training data. Each example in the training data consists of some features (the inputs) together with an expected target (the output). Training the network means adjusting its weights in such a way that it can transform the features into the target. In the 80 Cereals dataset, for instance, we want a network that can take each cereal's 'sugar', 'fiber', and 'protein' content and produce a prediction for that cereal's 'calories'. If we can successfully train a network to do that, its weights must represent in some way the relationship between those features and that target as expressed in the training data. In addition to the training data, we need two more things: - A "loss function" that measures how good the network's predictions are. - An "optimizer" that can tell the network how to change its weights. The Loss Function We've seen how to design an architecture for a network, but we haven't seen how to tell a network what problem to solve. This is the job of the loss function. The loss function measures the disparity between the the target's true value and the value the model predicts. Different problems call for different loss functions. We have been looking at regression problems, where the task is to predict some numerical value -- calories in 80 Cereals, rating in Red Wine Quality. Other regression tasks might be predicting the price of a house or the fuel efficiency of a car. A common loss function for regression problems is the mean absolute error or MAE. For each prediction y_pred, MAE measures the disparity from the true target y_true by an absolute difference abs(y_true - y_pred). The total MAE loss on a dataset is the mean of all these absolute differences. <figure style="padding: 1em;"> <img src="https://i.imgur.com/VDcvkZN.png" width="500" alt="A graph depicting error bars from data points to the fitted line.."> <figcaption style="textalign: center; font-style: italic"><center>The mean absolute error is the average length between the fitted curve and the data points. </center></figcaption> </figure> Besides MAE, other loss functions you might see for regression problems are the mean-squared error (MSE) or the Huber loss (both available in Keras). During training, the model will use the loss function as a guide for finding the correct values of its weights (lower loss is better). In other words, the loss function tells the network its objective. The Optimizer - Stochastic Gradient Descent We've described the problem we want the network to solve, but now we need to say how to solve it. This is the job of the optimizer. The optimizer is an algorithm that adjusts the weights to minimize the loss. Virtually all of the optimization algorithms used in deep learning belong to a family called stochastic gradient descent. They are iterative algorithms that train a network in steps. One step of training goes like this: 1. Sample some training data and run it through the network to make predictions. 2. Measure the loss between the predictions and the true values. 3. Finally, adjust the weights in a direction that makes the loss smaller. Then just do this over and over until the loss is as small as you like (or until it won't decrease any further.) <figure style="padding: 1em;"> <img src="https://i.imgur.com/rFI1tIk.gif" width="1600" alt="Fitting a line batch by batch. The loss decreases and the weights approach their true values."> <figcaption style="textalign: center; font-style: italic"><center>Training a neural network with Stochastic Gradient Descent. </center></figcaption> </figure> Each iteration's sample of training data is called a minibatch (or often just "batch"), while a complete round of the training data is called an epoch. The number of epochs you train for is how many times the network will see each training example. The animation shows the linear model from Lesson 1 being trained with SGD. The pale red dots depict the entire training set, while the solid red dots are the minibatches. Every time SGD sees a new minibatch, it will shift the weights (w the slope and b the y-intercept) toward their correct values on that batch. Batch after batch, the line eventually converges to its best fit. You can see that the loss gets smaller as the weights get closer to their true values. Learning Rate and Batch Size Notice that the line only makes a small shift in the direction of each batch (instead of moving all the way). The size of these shifts is determined by the learning rate. A smaller learning rate means the network needs to see more minibatches before its weights converge to their best values. The learning rate and the size of the minibatches are the two parameters that have the largest effect on how the SGD training proceeds. Their interaction is often subtle and the right choice for these parameters isn't always obvious. (We'll explore these effects in the exercise.) Fortunately, for most work it won't be necessary to do an extensive hyperparameter search to get satisfactory results. Adam is an SGD algorithm that has an adaptive learning rate that makes it suitable for most problems without any parameter tuning (it is "self tuning", in a sense). Adam is a great general-purpose optimizer. Adding the Loss and Optimizer After defining a model, you can add a loss function and optimizer with the model's compile method: model.compile( optimizer="adam", loss="mae", ) Notice that we are able to specify the loss and optimizer with just a string. You can also access these directly through the Keras API -- if you wanted to tune parameters, for instance -- but for us, the defaults will work fine. <blockquote style="margin-right:auto; margin-left:auto; background-color: #ebf9ff; padding: 1em; margin:24px;"> <strong>What's In a Name?</strong><br> The <strong>gradient</strong> is a vector that tells us in what direction the weights need to go. More precisely, it tells us how to change the weights to make the loss change <em>fastest</em>. We call our process gradient <strong>descent</strong> because it uses the gradient to <em>descend</em> the loss curve towards a minimum. <strong>Stochastic</strong> means "determined by chance." Our training is <em>stochastic</em> because the minibatches are <em>random samples</em> from the dataset. And that's why it's called SGD! </blockquote> Example - Red Wine Quality Now we know everything we need to start training deep learning models. So let's see it in action! We'll use the Red Wine Quality dataset. This dataset consists of physiochemical measurements from about 1600 Portuguese red wines. Also included is a quality rating for each wine from blind taste-tests. How well can we predict a wine's perceived quality from these measurements? We've put all of the data preparation into this next hidden cell. It's not essential to what follows so feel free to skip it. One thing you might note for now though is that we've rescaled each feature to lie in the interval $[0, 1]$. As we'll discuss more in Lesson 5, neural networks tend to perform best when their inputs are on a common scale. End of explanation """ print(X_train.shape) """ Explanation: How many inputs should this network have? We can discover this by looking at the number of columns in the data matrix. Be sure not to include the target ('quality') here -- only the input features. End of explanation """ from tensorflow import keras from tensorflow.keras import layers model = keras.Sequential([ layers.Dense(512, activation='relu', input_shape=[11]), layers.Dense(512, activation='relu'), layers.Dense(512, activation='relu'), layers.Dense(1), ]) """ Explanation: Eleven columns means eleven inputs. We've chosen a three-layer network with over 1500 neurons. This network should be capable of learning fairly complex relationships in the data. End of explanation """ model.compile( optimizer='adam', loss='mae', ) """ Explanation: Deciding the architecture of your model should be part of a process. Start simple and use the validation loss as your guide. You'll learn more about model development in the exercises. After defining the model, we compile in the optimizer and loss function. End of explanation """ history = model.fit( X_train, y_train, validation_data=(X_valid, y_valid), batch_size=256, epochs=10, ) """ Explanation: Now we're ready to start the training! We've told Keras to feed the optimizer 256 rows of the training data at a time (the batch_size) and to do that 10 times all the way through the dataset (the epochs). End of explanation """ import pandas as pd # convert the training history to a dataframe history_df = pd.DataFrame(history.history) # use Pandas native plot method history_df['loss'].plot(); """ Explanation: You can see that Keras will keep you updated on the loss as the model trains. Often, a better way to view the loss though is to plot it. The fit method in fact keeps a record of the loss produced during training in a History object. We'll convert the data to a Pandas dataframe, which makes the plotting easy. End of explanation """
GoogleCloudPlatform/mlops-on-gcp
model_serving/caip-load-testing/03-analyze-results.ipynb
apache-2.0
import time from datetime import datetime from typing import List import numpy as np import pandas as pd import google.auth from google.cloud import logging_v2 from google.cloud.monitoring_dashboard.v1 import DashboardsServiceClient from google.cloud.logging_v2 import MetricsServiceV2Client from google.cloud.monitoring_v3.query import Query from google.cloud.monitoring_v3 import MetricServiceClient import matplotlib.pyplot as plt """ Explanation: Analyzing Locust Load Testing Results This Notebook demonstrates how to analyze AI Platform Prediction load testing runs using metrics captured in Cloud Monitoring. This Notebook build on the 02-perf-testing.ipynb notebook that shows how to configure and run load tests against AI Platform Prediction using Locust.io. The outlined testing process results in a Pandas dataframe that aggregates the standard AI Platform Prediction metrics with a set of custom, log-based metrics generated from log entries captured by the Locust testing script. The Notebook covers the following steps: 1. Retrieve and consolidate test results from Cloud Monitoring 2. Analyze and visualize utilization and latency results Setup This notebook was tested on AI Platform Notebooks using the standard TF 2.2 image. Import libraries End of explanation """ PROJECT_ID = '[your-project-id]' # Set your project Id MODEL_NAME = 'resnet_classifier' MODEL_VERSION = 'v1' LOG_NAME = 'locust' # Set your log name TEST_ID = 'test-20200829-190943' # Set your test Id TEST_START_TIME = datetime.fromisoformat('2020-08-28T21:30:00-00:00') # Set your test start time TEST_END_TIME = datetime.fromisoformat('2020-08-29T22:00:00-00:00') # Set your test end time """ Explanation: Configure GCP environment settings End of explanation """ creds , _ = google.auth.default() client = MetricServiceClient(credentials=creds) project_path = client.project_path(PROJECT_ID) filter = 'metric.type=starts_with("ml.googleapis.com/prediction")' for descriptor in client.list_metric_descriptors(project_path, filter_=filter): print(descriptor.type) """ Explanation: 1. Retrieve and consolidate test results Locust's web interface along with a Cloud Monitoring dashboard provide a cursory view into performance of a tested AI Platform Prediction model version. A more thorough analysis can be performed by consolidating metrics collected during a test and using data analytics and visualization tools. In this section, you will retrieve the metrics captured in Cloud Monitoring and consolidate them into a single Pandas dataframe. 1.1 List available AI Platform Prediction metrics End of explanation """ filter = 'metric.type=starts_with("logging.googleapis.com/user")' for descriptor in client.list_metric_descriptors(project_path, filter_=filter): print(descriptor.type) """ Explanation: 1.2. List custom log based metrics End of explanation """ def retrieve_metrics(client, project_id, start_time, end_time, model, model_version, test_id, log_name): """ Retrieves test metrics from Cloud Monitoring. """ def _get_aipp_metric(metric_type: str, labels: List[str]=[], metric_name=None)-> pd.DataFrame: """ Retrieves a specified AIPP metric. """ query = Query(client, project_id, metric_type=metric_type) query = query.select_interval(end_time, start_time) query = query.select_resources(model_id=model) query = query.select_resources(version_id=model_version) if metric_name: labels = ['metric'] + labels df = query.as_dataframe(labels=labels) if not df.empty: if metric_name: df.columns.set_levels([metric_name], level=0, inplace=True) df = df.set_index(df.index.round('T')) return df def _get_locust_metric(metric_type: str, labels: List[str]=[], metric_name=None)-> pd.DataFrame: """ Retrieves a specified custom log-based metric. """ query = Query(client, project_id, metric_type=metric_type) query = query.select_interval(end_time, start_time) query = query.select_metrics(log=log_name) query = query.select_metrics(test_id=test_id) if metric_name: labels = ['metric'] + labels df = query.as_dataframe(labels=labels) if not df.empty: if metric_name: df.columns.set_levels([metric_name], level=0, inplace=True) df = df.apply(lambda row: [metric.mean for metric in row]) df = df.set_index(df.index.round('T')) return df # Retrieve GPU duty cycle metric_type = 'ml.googleapis.com/prediction/online/accelerator/duty_cycle' metric = _get_aipp_metric(metric_type, ['replica_id', 'signature'], 'duty_cycle') df = metric # Retrieve CPU utilization metric_type = 'ml.googleapis.com/prediction/online/cpu/utilization' metric = _get_aipp_metric(metric_type, ['replica_id', 'signature'], 'cpu_utilization') if not metric.empty: df = df.merge(metric, how='outer', right_index=True, left_index=True) # Retrieve prediction count metric_type = 'ml.googleapis.com/prediction/prediction_count' metric = _get_aipp_metric(metric_type, ['replica_id', 'signature'], 'prediction_count') if not metric.empty: df = df.merge(metric, how='outer', right_index=True, left_index=True) # Retrieve responses per second metric_type = 'ml.googleapis.com/prediction/response_count' metric = _get_aipp_metric(metric_type, ['replica_id', 'signature'], 'response_rate') if not metric.empty: metric = (metric/60).round(2) df = df.merge(metric, how='outer', right_index=True, left_index=True) # Retrieve backend latencies metric_type = 'ml.googleapis.com/prediction/latencies' metric = _get_aipp_metric(metric_type, ['latency_type', 'replica_id', 'signature']) if not metric.empty: metric = metric.apply(lambda row: [round(latency.mean/1000,1) for latency in row]) metric.columns.set_names(['metric', 'replica_id', 'signature'], inplace=True) level_values = ['Latency: ' + value for value in metric.columns.get_level_values(level=0)] metric.columns.set_levels(level_values, level=0, inplace=True) df = df.merge(metric, how='outer', right_index=True, left_index=True) # Retrieve Locust latency metric_type = 'logging.googleapis.com/user/locust_latency' metric = _get_locust_metric(metric_type, ['replica_id', 'signature'], 'Latency: client') if not metric.empty: metric = metric.round(2).replace([0], np.nan) df = df.merge(metric, how='outer', right_index=True, left_index=True) # Retrieve Locust user count metric_type = 'logging.googleapis.com/user/locust_users' metric = _get_locust_metric(metric_type, ['replica_id', 'signature'], 'User count') if not metric.empty: metric = metric.round() df = df.merge(metric, how='outer', right_index=True, left_index=True) # Retrieve Locust num_failures metric_type = 'logging.googleapis.com/user/num_failures' metric = _get_locust_metric(metric_type, ['replica_id', 'signature'], 'Num of failures') if not metric.empty: metric = metric.round() df = df.merge(metric, how='outer', right_index=True, left_index=True) # Retrieve Locust num_failures metric_type = 'logging.googleapis.com/user/num_requests' metric = _get_locust_metric(metric_type, ['replica_id', 'signature'], 'Num of requests') if not metric.empty: metric = metric.round() df = df.merge(metric, how='outer', right_index=True, left_index=True) return df test_result = retrieve_metrics( client, PROJECT_ID, TEST_START_TIME, TEST_END_TIME, MODEL_NAME, MODEL_VERSION, TEST_ID, LOG_NAME ) test_result.head().T """ Explanation: 1.3. Retrieve test metrics Define a helper function that retrieves test metrics from Cloud Monitoring End of explanation """ gpu_utilization_results = test_result['duty_cycle'] gpu_utilization_results.columns = gpu_utilization_results.columns.get_level_values(0) ax = gpu_utilization_results.plot(figsize=(14, 9), legend=True) ax.set_xlabel('Time', fontsize=16) ax.set_ylabel('Utilization ratio', fontsize=16) _ = ax.set_title("GPU Utilization", fontsize=20) """ Explanation: The retrieved dataframe uses hierarchical indexing for column names. The reason is that some metrics contain multiple time series. For example, the GPU duty_cycle metric includes a time series of measures per each GPU used in the deployment (denoted as replica_id). The top level of the column index is a metric name. The second level is a replica_id. The third level is a signature of a model. All metrics are aligned on the same timeline. 2. Analyzing and Visualizing test results In the context of our scenario the key concern is GPU utilization at various levels of throughput and latency. The primary metric exposed by AI Platform Prediction to monitor GPU utilization is duty cycle. This metric captures an average fraction of time over the 60 second period during which the accelerator(s) were actively processing. 2.1. GPU utilization End of explanation """ cpu_utilization_results = test_result['cpu_utilization'] cpu_utilization_results.columns = cpu_utilization_results.columns.get_level_values(0) ax = cpu_utilization_results.plot(figsize=(14, 9), legend=True) ax.set_xlabel('Time', fontsize=16) ax.set_ylabel('Utilization ratio', fontsize=16) _ = ax.set_title("CPU Utilization", fontsize=20) """ Explanation: 2.2. CPU utilization End of explanation """ latency_results = test_result[['Latency: model', 'Latency: client']] latency_results.columns = latency_results.columns.get_level_values(0) ax = latency_results.plot(figsize=(14, 9), legend=True) ax.set_xlabel('Time', fontsize=16) ax.set_ylabel('milisecond', fontsize=16) _ = ax.set_title("Latency", fontsize=20) """ Explanation: 2.3. Latency End of explanation """ throughput_results = test_result[['response_rate', 'User count']] throughput_results.columns = throughput_results.columns.get_level_values(0) ax = throughput_results.plot(figsize=(14, 9), legend=True) ax.set_xlabel('Time', fontsize=16) ax.set_ylabel('Count', fontsize=16) _ = ax.set_title("Response Rate vs User Count", fontsize=20) """ Explanation: 2.4. Request throughput We are going to use the response_rate metric, which tracks a number of responses returned by AI Platform Prediction over a 1 minute interval. End of explanation """ logging_client = MetricsServiceV2Client(credentials=creds) parent = logging_client.project_path(PROJECT_ID) for element in logging_client.list_log_metrics(parent): metric_path = logging_client.metric_path(PROJECT_ID, element.name) logging_client.delete_log_metric(metric_path) print("Deleted metric: ", metric_path) display_name = 'AI Platform Prediction and Locust' dashboard_service_client = DashboardsServiceClient(credentials=creds) parent = 'projects/{}'.format(PROJECT_ID) for dashboard in dashboard_service_client.list_dashboards(parent): if dashboard.display_name == display_name: dashboard_service_client.delete_dashboard(dashboard.name) print("Deleted dashboard:", dashboard.name) """ Explanation: Cleaning up: delete the log-based metrics and dasboard End of explanation """
Neuroglycerin/neukrill-net-work
notebooks/augmentation/Preliminary Online Augmentation Results.ipynb
mit
import pylearn2.utils import pylearn2.config import theano import neukrill_net.dense_dataset import neukrill_net.utils import numpy as np %matplotlib inline import matplotlib.pyplot as plt import holoviews as hl %load_ext holoviews.ipython import sklearn.metrics cd .. settings = neukrill_net.utils.Settings("settings.json") run_settings = neukrill_net.utils.load_run_settings( "run_settings/replicate_8aug.json", settings, force=True) model = pylearn2.utils.serial.load(run_settings['alt_picklepath']) c = 'train_objective' channel = model.monitor.channels[c] """ Explanation: The following are the results we've got from online augmentation so far. Some bugs have been fixed by Scott since then so these might be redundant. If they're not redundant then they are very bad. Loading the pickle End of explanation """ plt.title(c) plt.plot(channel.example_record,channel.val_record) c = 'train_y_nll' channel = model.monitor.channels[c] plt.title(c) plt.plot(channel.example_record,channel.val_record) def plot_monitor(c = 'valid_y_nll'): channel = model.monitor.channels[c] plt.title(c) plt.plot(channel.example_record,channel.val_record) return None plot_monitor() plot_monitor(c="valid_objective") """ Explanation: Replicating 8aug The DensePNGDataset run with 8 augmentations got us most of the way to our best score in one go. If we can replicate that results with online augmentation then we can be pretty confident that online augmentation is a good idea. Unfortunately, it looks like we can't: End of explanation """ %run check_test_score.py run_settings/replicate_8aug.json """ Explanation: Would actually like to know what kind of score this model gets on the check_test_score script. End of explanation """ run_settings = neukrill_net.utils.load_run_settings( "run_settings/online_manyaug.json", settings, force=True) model = pylearn2.utils.serial.load(run_settings['alt_picklepath']) plot_monitor(c="valid_objective") """ Explanation: So we can guess that the log loss score we're seeing is in fact correct. There are definitely some bugs in the ListDataset code. Many Augmentations We want to be able to use online augmentations to run large combinations of different augmentations on the images. This model had almost everything turned on, a little: End of explanation """ settings = neukrill_net.utils.Settings("settings.json") run_settings = neukrill_net.utils.load_run_settings( "run_settings/alexnet_based_onlineaug.json", settings, force=True) model = pylearn2.utils.serial.load(run_settings['pickle abspath']) plot_monitor(c="train_y_nll") plot_monitor(c="valid_y_nll") plot_monitor(c="train_objective") plot_monitor(c="valid_objective") """ Explanation: Looks like it's completely incapable of learning. These problems suggest that the augmentation might be garbling the images; making them useless for learning from. Or worse, garbling the order so each image doesn't correspond to its label. Transformer Results We also have results from a network trained using a Transformer dataset, which is how online augmentation is supposed to be supported in Pylearn2. End of explanation """
AEW2015/PYNQ_PR_Overlay
Pynq-Z1/notebooks/examples/tracebuffer_i2c.ipynb
bsd-3-clause
from pprint import pprint from time import sleep from pynq import PL from pynq import Overlay from pynq.drivers import Trace_Buffer from pynq.iop import Pmod_TMP2 from pynq.iop import PMODA from pynq.iop import PMODB from pynq.iop import ARDUINO ol = Overlay("base.bit") ol.download() pprint(PL.ip_dict) """ Explanation: Trace Buffer - Tracing IIC Transactions The Trace_Buffer class can monitor the waveform and transations on PMODA, PMODB, and ARDUINO connectors. This demo shows how to use this class to track IIC transactions. For this demo, users have to connect the Pmod TMP2 sensor to PMODA. Step 1: Overlay Management Users have to import all the necessary classes. Make sure to use the right bitstream. End of explanation """ tmp2 = Pmod_TMP2(PMODA) tmp2.set_log_interval_ms(1) """ Explanation: Step 2: Instantiating Temperature Sensor Although this demo can also be done on PMODB, we use PMODA in this demo. Set the log interval to be 1ms. This means the IO Processor (IOP) will read temperature values every 1ms. End of explanation """ tr_buf = Trace_Buffer(PMODA,"i2c",samplerate=1000000) # Start the trace buffer tr_buf.start() # Issue reads for 1 second tmp2.start_log() sleep(1) tmp2_log = tmp2.get_log() # Stop the trace buffer tr_buf.stop() """ Explanation: Step 3: Tracking Transactions Instantiating the trace buffer with IIC protocol. The sample rate is set to 1MHz. Although the IIC clock is only 100kHz, we still have to use higher sample rate to keep track of IIC control signals from IOP. After starting the trace buffer DMA, also start to issue IIC reads for 1 second. Then stop the trace buffer DMA. End of explanation """ # Configuration for PMODA start = 600 stop = 10000 tri_sel=[0x40000,0x80000] tri_0=[0x4,0x8] tri_1=[0x400,0x800] mask = 0x0 # Parsing and decoding tr_buf.parse("i2c_trace.csv", start,stop,mask,tri_sel,tri_0,tri_1) tr_buf.set_metadata(['SDA','SCL']) tr_buf.decode("i2c_trace.pd") """ Explanation: Step 4: Parsing and Decoding Transactions The trace buffer object is able to parse the transactions into a *.csv file (saved into the same folder as this script). The input arguments for the parsing method is: * start : the starting sample number of the trace. * stop : the stopping sample number of the trace. * tri_sel: masks for tri-state selection bits. * tri_0: masks for pins selected when the corresponding tri_sel = 0. * tri_0: masks for pins selected when the corresponding tri_sel = 1. * mask: mask for pins selected always. For PMODB, the configuration of the masks can be: * tri_sel=[0x40000<<32,0x80000<<32] * tri_0=[0x4<<32,0x8<<32] * tri_1=[0x400<<32,0x800<<32] * mask = 0x0 Then the trace buffer object can also decode the transactions using the open-source sigrok decoders. The decoded file (*.pd) is saved into the same folder as this script. Reference: https://sigrok.org/wiki/Main_Page End of explanation """ s0 = 1 s1 = 5000 tr_buf.display(s0,s1) """ Explanation: Step 5: Displaying the Result The final waveform and decoded transactions are shown using the open-source wavedrom library. The two input arguments (s0 and s1 ) indicate the starting and stopping location where the waveform is shown. The valid range for s0 and s1 is: 0 &lt; s0 &lt; s1 &lt; (stop-start), where start and stop are defined in the last step. Reference: https://www.npmjs.com/package/wavedrom End of explanation """
rnder/data-science-from-scratch
notebook/ch21_network_analysis.ipynb
unlicense
from __future__ import division import math, random, re from collections import defaultdict, Counter, deque from linear_algebra import dot, get_row, get_column, make_matrix, magnitude, scalar_multiply, shape, distance from functools import partial users = [ { "id": 0, "name": "Hero" }, { "id": 1, "name": "Dunn" }, { "id": 2, "name": "Sue" }, { "id": 3, "name": "Chi" }, { "id": 4, "name": "Thor" }, { "id": 5, "name": "Clive" }, { "id": 6, "name": "Hicks" }, { "id": 7, "name": "Devin" }, { "id": 8, "name": "Kate" }, { "id": 9, "name": "Klein" } ] """ Explanation: 21์žฅ ๋„คํŠธ์›Œํฌ ๋ถ„์„ ๋งŽ์€ ๋ฐ์ดํ„ฐ ๋ฌธ์ œ๋Š” ๋…ธ๋“œ(node)์™€ ๊ทธ ์‚ฌ์ด๋ฅผ ์—ฐ๊ฒฐํ•˜๋Š” ์—ฃ์ง€(edge)๋กœ ๊ตฌ์„ฑ๋œ ๋„คํŠธ์›Œํฌ(network)์˜ ๊ด€์ ์—์„œ ๋ณผ ์ˆ˜ ์žˆ๋‹ค. ์˜ˆ๋ฅผ๋“ค์–ด, ํŽ˜์ด์Šค๋ถ์—์„œ๋Š” ์‚ฌ์šฉ์ž๊ฐ€ ๋…ธ๋“œ๋ผ๋ฉด ๊ทธ๋“ค์˜ ์นœ๊ตฌ ๊ด€๊ณ„๋Š” ์—ฃ์ง€๊ฐ€ ๋œ๋‹ค. ์›น์—์„œ๋Š” ๊ฐ ์›นํŽ˜์ด์ง€๊ฐ€ ๋…ธ๋“œ์ด๊ณ  ํŽ˜์ด์ง€ ์‚ฌ์ด๋ฅผ ์—ฐ๊ฒฐํ•˜๋Š” ํ•˜์ดํผ๋งํฌ๊ฐ€ ์—ฃ์ง€๊ฐ€ ๋œ๋‹ค. ํŽ˜์ด์Šค๋ถ์˜ ์นœ๊ตฌ ๊ด€๊ณ„๋Š” ์ƒํ˜ธ์ ์ด๋‹ค. ๋‚ด๊ฐ€ ๋‹น์‹ ๊ณผ ์นœ๊ตฌ๋ผ๋ฉด ๋‹น์‹ ์€ ๋ฐ˜๋“œ์‹œ ๋‚˜์™€ ์นœ๊ตฌ์ด๋‹ค. ์ฆ‰, ์ด๋Ÿฐ ๊ฒฝ์šฐ๋ฅผ ์—ฃ์ง€์— ๋ฐฉํ–ฅ์ด ์—†๋‹ค(undirected)๊ณ  ํ•œ๋‹ค. ๋ฐ˜๋ฉด ํ•˜์ดํผ๋งํฌ๋Š” ๊ทธ๋ ‡์ง€ ์•Š๋‹ค. ๋‚ด ํ™ˆํŽ˜์ด์ง€์—๋Š” ๋Œ€ํ•œ๋ฏผ๊ตญ ๊ตญํšŒ ํ™ˆํŽ˜์ด์ง€์— ๋Œ€ํ•œ ๋งํฌ๊ฐ€ ์žˆ์–ด๋„, ๋ฐ˜๋Œ€๋กœ ๋Œ€ํ•œ๋ฏผ๊ตญ ๊ตญํšŒ ํ™ˆํŽ˜์ด์ง€์—๋Š” ๋‚ด ํ™ˆํŽ˜์ด์ง€์— ๋Œ€ํ•œ ๋งํฌ๊ฐ€ ์—†์„ ์ˆ˜ ์žˆ๋‹ค. ์ด๋Ÿฐ ๋„คํŠธ์›Œํฌ์—๋Š” ๋ฐฉํ–ฅ์ด ์žˆ๊ธฐ ๋•Œ๋ฌธ์— ๋ฐฉํ–ฅ์„ฑ ๋„คํŠธ์›Œํฌ(directed network)๋ผ๊ณ  ํ•œ๋‹ค. 21.1 ๋งค๊ฐœ ์ค‘์‹ฌ์„ฑ 1์žฅ์—์„œ ์šฐ๋ฆฌ๋Š” ๋ฐ์ดํ…€ ๋„คํŠธ์›Œํฌ์—์„œ ์นœ๊ตฌ์˜ ์ˆ˜๋ฅผ ์…ˆ์œผ๋กœ์จ ์ค‘์‹ฌ์ด ๋˜๋Š” ์ฃผ์š” ํ•ต์‹ฌ ์ธ๋ฌผ์„ ์ฐพ์•˜๋‹ค. ์—ฌ๊ธฐ์„œ๋Š” ๋ช‡ ๊ฐ€์ง€ ์ถ”๊ฐ€์ ์ธ ์ ‘๊ทผ๋ฒ•์„ ์‚ดํŽด๋ณด์ž. End of explanation """ friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4), (4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)] """ Explanation: ๋„คํŠธ์›Œํฌ๋Š” ์‚ฌ์šฉ์ž์™€ ์นœ๊ตฌ ๊ด€๊ณ„๋ฅผ ๋‚˜ํƒ€๋‚ธ๋‹ค. End of explanation """ # give each user a friends list for user in users: user["friends"] = [] # and populate it for i, j in friendships: # this works because users[i] is the user whose id is i users[i]["friends"].append(users[j]) # add i as a friend of j users[j]["friends"].append(users[i]) # add j as a friend of i """ Explanation: ์นœ๊ตฌ ๋ชฉ๋ก์„ ๊ฐ ์‚ฌ์šฉ์ž์˜ dict์— ์ถ”๊ฐ€ํ•˜๊ธฐ๋„ ํ–ˆ๋‹ค. End of explanation """ # # Betweenness Centrality # def shortest_paths_from(from_user): # ํŠน์ • ์‚ฌ์šฉ์ž๋กœ๋ถ€ํ„ฐ ๋‹ค๋ฅธ ์‚ฌ์šฉ์ž๊นŒ์ง€์˜ ๋ชจ๋“  ์ตœ๋‹จ ๊ฒฝ๋กœ๋ฅผ ํฌํ•จํ•˜๋Š” dict shortest_paths_to = { from_user["id"] : [[]] } # ํ™•์ธํ•ด์•ผ ํ•˜๋Š” (์ด์ „ ์‚ฌ์šฉ์ž, ๋‹ค์Œ ์‚ฌ์šฉ์ž) ํ # ๋ชจ๋“  (from_user, from_user์˜ ์นœ๊ตฌ) ์Œ์œผ๋กœ ์‹œ์ž‘ frontier = deque((from_user, friend) for friend in from_user["friends"]) # ํ๊ฐ€ ๋นŒ ๋•Œ๊นŒ์ง€ ๋ฐ˜๋ณต while frontier: prev_user, user = frontier.popleft() # ํ์˜ ์ฒซ ๋ฒˆ์งธ ์‚ฌ์šฉ์ž๋ฅผ user_id = user["id"] # ์ œ๊ฑฐ # ํ์— ์‚ฌ์šฉ์ž๋ฅผ ์ถ”๊ฐ€ํ•˜๋Š” ๋ฐฉ๋ฒ•์„ ๊ณ ๋ คํ•ด ๋ณด๋ฉด # prev_user๊นŒ์ง€์˜ ์ตœ๋‹จ ๊ฒฝ๋กœ๋ฅผ ์ด๋ฏธ ์•Œ๊ณ  ์žˆ์„ ์ˆ˜๋„ ์žˆ๋‹ค. paths_to_prev = shortest_paths_to[prev_user["id"]] paths_via_prev = [path + [user_id] for path in paths_to_prev] # ๋งŒ์•ฝ ์ตœ๋‹จ ๊ฒฝ๋กœ๋ฅผ ์ด๋ฏธ ์•Œ๊ณ  ์žˆ๋‹ค๋ฉด old_paths_to_here = shortest_paths_to.get(user_id, []) # ์ง€๊ธˆ๊นŒ์ง€์˜ ์ตœ๋‹จ ๊ฒฝ๋กœ๋Š” ๋ฌด์—‡์ผ๊นŒ? if old_paths_to_here: min_path_length = len(old_paths_to_here[0]) else: min_path_length = float('inf') # ๊ธธ์ง€ ์•Š์€ ์ƒˆ๋กœ์šด ๊ฒฝ๋กœ๋งŒ ์ €์žฅ new_paths_to_here = [path_via_prev for path_via_prev in paths_via_prev if len(path_via_prev) <= min_path_length and path_via_prev not in old_paths_to_here] shortest_paths_to[user_id] = old_paths_to_here + new_paths_to_here # ์•„์ง ํ•œ๋ฒˆ๋„ ๋ณด์ง€ ๋ชปํ•œ ์ด์›ƒ์„ frontier์— ์ถ”๊ฐ€ frontier.extend((user, friend) for friend in user["friends"] if friend["id"] not in shortest_paths_to) return shortest_paths_to """ Explanation: 1์žฅ์—์„œ ์—ฐ๊ฒฐ ์ค‘์‹ฌ์„ฑ(degree centrality)์„ ์‚ดํŽด๋ณผ ๋•Œ๋Š”, ์šฐ๋ฆฌ๊ฐ€ ์ง๊ด€์ ์œผ๋กœ ์ƒ๊ฐํ–ˆ๋˜ ์ฃผ์š” ์—ฐ๊ฒฐ๊ณ ๋ฆฌ๋“ค์ด ์„ ์ •๋˜์ง€ ์•Š์•„ ์•ฝ๊ฐ„ ์•„์‰ฌ์› ๋‹ค. ๋Œ€์•ˆ์œผ๋กœ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ๋Š” ์ง€์ˆ˜ ์ค‘ ํ•˜๋‚˜๋Š” ๋งค๊ฐœ ์ค‘์‹ฌ์„ฑ(betweenness centrality)์ธ๋ฐ, ์ด๋Š” ๋‘ ์‚ฌ๋žŒ ์‚ฌ์ด์˜ ์ตœ๋‹จ ๊ฒฝ๋กœ์ƒ์— ๋นˆ๋ฒˆํ•˜๊ฒŒ ๋“ฑ์žฅํ•˜๋Š” ์‚ฌ๋žŒ๋“ค์ด ํฐ ๊ฐ’์„ ๊ฐ€์ง€๋Š” ์ง€์ˆ˜์ด๋‹ค. ๊ตฌ์ฒด์ ์œผ๋กœ๋Š”, ๋…ธ๋“œ $i$์˜ ๋งค๊ฐœ ์ค‘์‹ฌ์„ฑ์€ ๋‹ค๋ฅธ ๋ชจ๋“  ๋…ธ๋“œ $j,k$ ์Œ์˜ ์ตœ๋‹จ ๊ฒฝ๋กœ ์ค‘์—, $i$๋ฅผ ๊ฑฐ์น˜๋Š” ๊ฒฝ๋กœ์˜ ๋น„์œจ๋กœ ๊ณ„์‚ฐํ•œ๋‹ค. ์ž„์˜์˜ ๋‘ ์‚ฌ๋žŒ์ด ์ฃผ์–ด์กŒ์„ ๋•Œ ๊ทธ๋“ค ๊ฐ„์˜ ์ตœ๋‹จ ๊ฒฝ๋กœ๋ฅผ ๊ตฌํ•ด์•ผ ํ•œ๋‹ค. ์ด ์ฑ…์—์„œ๋Š” ๋œ ํšจ์œจ์ ์ด๋”๋ผ๋„ ํ›จ์”ฌ ์ดํ•ดํ•˜๊ธฐ ์‰ฌ์šด 'Breadth-first search'๋ผ๊ณ ๋„ ์•Œ๋ ค์ง„ ์•Œ๊ณ ๋ฆฌ์ฆ˜์„ ์‚ฌ์šฉํ•œ๋‹ค. End of explanation """ for user in users: user["shortest_paths"] = shortest_paths_from(user) """ Explanation: ๊ทธ๋ฆฌ๊ณ  ๊ฐ ๋…ธ๋“œ์— ๋Œ€ํ•ด ์ƒ์„ฑ๋œ dict๋“ค์„ ์ €์žฅํ•˜์ž. End of explanation """ for user in users: user["betweenness_centrality"] = 0.0 for source in users: source_id = source["id"] for target_id, paths in source["shortest_paths"].items(): # python2์—์„œ๋Š” items ๋Œ€์‹  iteritems ์‚ฌ์šฉ if source_id < target_id: # ์ž˜๋ชปํ•ด์„œ ๋‘ ๋ฒˆ ์„ธ์ง€ ์•Š๋„๋ก ์ฃผ์˜ํ•˜์ž num_paths = len(paths) # ์ตœ๋‹จ ๊ฒฝ๋กœ๊ฐ€ ๋ช‡ ๊ฐœ ์กด์žฌํ•˜๋Š”๊ฐ€? contrib = 1 / num_paths # ์ค‘์‹ฌ์„ฑ์— ๊ธฐ์—ฌํ•˜๋Š” ๊ฐ’ for path in paths: for id in path: if id not in [source_id, target_id]: users[id]["betweenness_centrality"] += contrib for user in users: print(user["id"], user["betweenness_centrality"]) """ Explanation: ๊ทธ๋Ÿฌ๋ฉด ์ด์ œ ๋งค๊ฐœ ์ค‘์‹ฌ์„ฑ์„ ๊ตฌํ•  ์ค€๋น„๊ฐ€ ๋‹ค ๋˜์—ˆ๋‹ค. ์ด์ œ ๊ฐ๊ฐ์˜ ์ตœ๋‹จ ๊ฒฝ๋กœ์— ํฌํ•จ๋˜๋Š” ๊ฐ ๋…ธ๋“œ์˜ ๋งค๊ฐœ ์ค‘์‹ฌ์„ฑ์— $1/n$์„ ๋”ํ•ด ์ฃผ์ž. End of explanation """ # # closeness centrality # def farness(user): """๋ชจ๋“  ์‚ฌ์šฉ์ž์™€์˜ ์ตœ๋‹จ ๊ฑฐ๋ฆฌ ํ•ฉ""" return sum(len(paths[0]) for paths in user["shortest_paths"].values()) """ Explanation: ์‚ฌ์šฉ์ž 0๊ณผ 9์˜ ์ตœ๋‹จ ๊ฒฝ๋กœ ์‚ฌ์ด์—๋Š” ๋‹ค๋ฅธ ์‚ฌ์šฉ์ž๊ฐ€ ์—†์œผ๋ฏ€๋กœ ๋งค๊ฐœ ์ค‘์‹ฌ์„ฑ์ด 0์ด๋‹ค. ๋ฐ˜๋ฉด ์‚ฌ์šฉ์ž 3, 4, 5๋Š” ์ตœ๋‹จ ๊ฒฝ๋กœ์ƒ์— ๋ฌด์ฒ™ ๋นˆ๋ฒˆํ•˜๊ฒŒ ์œ„์น˜ํ•˜๊ธฐ ๋•Œ๋ฌธ์— ๋†’์€ ๋งค๊ฐœ ์ค‘์‹ฌ์„ฑ์„ ๊ฐ€์ง„๋‹ค. ๋Œ€๊ฒŒ ์ค‘์‹ฌ์„ฑ์˜ ์ ˆ๋Œ“๊ฐ’ ์ž์ฒด๋Š” ํฐ ์˜๋ฏธ๋ฅผ ๊ฐ€์ง€์ง€ ์•Š๊ณ , ์ƒ๋Œ€๊ฐ’๋งŒ์ด ์˜๋ฏธ๋ฅผ ๊ฐ€์ง„๋‹ค. ๊ทธ ์™ธ์— ์‚ดํŽด๋ณผ ์ˆ˜ ์žˆ๋Š” ์ค‘์‹ฌ์„ฑ ์ง€ํ‘œ ์ค‘ ํ•˜๋‚˜๋Š” ๊ทผ์ ‘ ์ค‘์‹ฌ์„ฑ(closeness centrality)์ด๋‹ค. ๋จผ์ € ๊ฐ ์‚ฌ์šฉ์ž์˜ ์›์ ‘์„ฑ(farness)์„ ๊ณ„์‚ฐํ•œ๋‹ค. ์›์ ‘์„ฑ์ด๋ž€ from_user์™€ ๋‹ค๋ฅธ ๋ชจ๋“  ์‚ฌ์šฉ์ž์˜ ์ตœ๋‹จ ๊ฒฝ๋กœ๋ฅผ ํ•ฉํ•œ ๊ฐ’์ด๋‹ค. End of explanation """ for user in users: user["closeness_centrality"] = 1 / farness(user) for user in users: print(user["id"], user["closeness_centrality"]) """ Explanation: ์ด์ œ ๊ทผ์ ‘ ์ค‘์‹ฌ์„ฑ์€ ๊ฐ„๋‹จํžˆ ๊ณ„์‚ฐํ•  ์ˆ˜ ์žˆ๋‹ค. End of explanation """ def matrix_product_entry(A, B, i, j): return dot(get_row(A, i), get_column(B, j)) def matrix_multiply(A, B): n1, k1 = shape(A) n2, k2 = shape(B) if k1 != n2: raise ArithmeticError("incompatible shapes!") return make_matrix(n1, k2, partial(matrix_product_entry, A, B)) def vector_as_matrix(v): """(list ํ˜•ํƒœ์˜) ๋ฒกํ„ฐ v๋ฅผ n x 1 ํ–‰๋ ฌ๋กœ ๋ณ€ํ™˜""" return [[v_i] for v_i in v] def vector_from_matrix(v_as_matrix): """n x 1 ํ–‰๋ ฌ์„ ๋ฆฌ์ŠคํŠธ๋กœ ๋ณ€ํ™˜""" return [row[0] for row in v_as_matrix] def matrix_operate(A, v): v_as_matrix = vector_as_matrix(v) product = matrix_multiply(A, v_as_matrix) return vector_from_matrix(product) """ Explanation: ๊ณ„์‚ฐ๋œ ๊ทผ์ ‘ ์ค‘์‹ฌ์„ฑ์˜ ํŽธ์ฐจ๋Š” ๋”์šฑ ์ž‘๋‹ค. ๋„คํŠธ์›Œํฌ ์ค‘์‹ฌ์— ์žˆ๋Š” ๋…ธ๋“œ์กฐ์ฐจ ์™ธ๊ณฝ์— ์œ„์น˜ํ•œ ๋…ธ๋“œ๋“ค๋กœ๋ถ€ํ„ฐ ๋ฉ€๋ฆฌ ๋–จ์–ด์ ธ ์žˆ๊ธฐ ๋•Œ๋ฌธ์ด๋‹ค. ์—ฌ๊ธฐ์„œ ๋ดค๋“ฏ์ด ์ตœ๋‹จ ๊ฒฝ๋กœ๋ฅผ ๊ณ„์‚ฐํ•˜๋Š” ๊ฒƒ์€ ๊ฝค๋‚˜ ๋ณต์žกํ•˜๋‹ค. ๊ทธ๋ ‡๊ธฐ ๋•Œ๋ฌธ์— ํฐ ๋„คํŠธ์›Œํฌ์—์„œ๋Š” ๊ทผ์ ‘ ์ค‘์‹ฌ์„ฑ์„ ์ž์ฃผ ์‚ฌ์šฉํ•˜์ง€ ์•Š๋Š”๋‹ค. ๋œ ์ง๊ด€์ ์ด์ง€๋งŒ ๋ณดํ†ต ๋” ์‰ฝ๊ฒŒ ๊ณ„์‚ฐํ•  ์ˆ˜ ์žˆ๋Š” ๊ณ ์œ ๋ฒกํ„ฐ ์ค‘์‹ฌ์„ฑ(eigenvector centrality)์„ ๋” ์ž์ฃผ ์‚ฌ์šฉํ•œ๋‹ค. 21.2 ๊ณ ์œ ๋ฒกํ„ฐ ์ค‘์‹ฌ์„ฑ ๊ณ ์œ ๋ฒกํ„ฐ ์ค‘์‹ฌ์„ฑ์— ๋Œ€ํ•ด ์•Œ์•„๋ณด๊ธฐ ์ „์— ๋จผ์ € ๊ณ ์œ ๋ฒกํ„ฐ๊ฐ€ ๋ฌด์—‡์ธ์ง€ ์‚ดํŽด๋ด์•ผ ํ•˜๊ณ , ๊ณ ์œ ๋ฒกํ„ฐ๊ฐ€ ๋ฌด์—‡์ธ์ง€ ์•Œ๊ธฐ ์œ„ํ•ด์„œ๋Š” ๋จผ์ € ํ–‰๋ ฌ ์—ฐ์‚ฐ์— ๋Œ€ํ•ด ์•Œ์•„๋ด์•ผ ํ•œ๋‹ค. 21.2.1 ํ–‰๋ ฌ ์—ฐ์‚ฐ End of explanation """ def find_eigenvector(A, tolerance=0.00001): guess = [1 for __ in A] while True: result = matrix_operate(A, guess) length = magnitude(result) next_guess = scalar_multiply(1/length, result) if distance(guess, next_guess) < tolerance: return next_guess, length # eigenvector, eigenvalue guess = next_guess """ Explanation: ํ–‰๋ ฌ A์˜ ๊ณ ์œ  ๋ฒกํ„ฐ๋ฅผ ์ฐพ๊ธฐ ์œ„ํ•ด, ์ž„์˜์˜ ๋ฒกํ„ฐ $v$๋ฅผ ๊ณจ๋ผ matrix_operate๋ฅผ ์ˆ˜ํ–‰ํ•˜๊ณ , ๊ฒฐ๊ณผ๊ฐ’์˜ ํฌ๊ธฐ๊ฐ€ 1์ด ๋˜๊ฒŒ ์žฌ์กฐ์ •ํ•˜๋Š” ๊ณผ์ •์„ ๋ฐ˜๋ณต ์ˆ˜ํ–‰ํ•œ๋‹ค. End of explanation """ rotate = [[0, 1], [-1, 0]] """ Explanation: ๊ฒฐ๊ณผ๊ฐ’์œผ๋กœ ๋ฐ˜ํ™˜๋˜๋Š” guess๋ฅผ matrix_operate๋ฅผ ํ†ตํ•ด ๊ฒฐ๊ณผ๊ฐ’์˜ ํฌ๊ธฐ๊ฐ€ 1์ธ ๋ฒกํ„ฐ๋กœ ์žฌ์กฐ์ •ํ•˜๋ฉด, ์ž๊ธฐ ์ž์‹ ์ด ๋ฐ˜ํ™˜๋œ๋‹ค. ์ฆ‰, ์—ฌ๊ธฐ์„œ guess๋Š” ๊ณ ์œ ๋ฒกํ„ฐ๋ผ๋Š” ๊ฒƒ์„ ์˜๋ฏธํ•œ๋‹ค. ๋ชจ๋“  ์‹ค์ˆ˜ ํ–‰๋ ฌ์— ๊ณ ์œ ๋ฒกํ„ฐ์™€ ๊ณ ์œ ๊ฐ’์ด ์žˆ๋Š” ๊ฒƒ์€ ์•„๋‹ˆ๋‹ค. ์˜ˆ๋ฅผ ๋“ค์–ด ์‹œ๊ณ„ ๋ฐฉํ–ฅ์œผ๋กœ 90๋„ ํšŒ์ „ํ•˜๋Š” ์—ฐ์‚ฐ์„ ํ•˜๋Š” ๋‹ค์Œ ํ–‰๋ ฌ์—๋Š” ๊ณฑํ–ˆ์„ ๋•Œ ๊ฐ€์ง€ ์ž์‹ ์ด ๋˜๋Š” ๋ฒกํ„ฐ๋Š” ์˜๋ฒกํ„ฐ๋ฐ–์— ์—†๋‹ค. End of explanation """ flip = [[0, 1], [1, 0]] """ Explanation: ์ด ํ–‰๋ ฌ๋กœ ์•ž์„œ ๊ตฌํ˜„ํ•œ find_eignevector(rotate)๋ฅผ ์ˆ˜ํ–‰ํ•˜๋ฉด, ์˜์›ํžˆ ๋๋‚˜์ง€ ์•Š์„ ๊ฒƒ์ด๋‹ค. ํ•œํŽธ, ๊ณ ์œ ๋ฒกํ„ฐ๊ฐ€ ์žˆ๋Š” ํ–‰๋ ฌ๋„ ๋•Œ๋กœ๋Š” ๋ฌดํ•œ๋ฃจํ”„์— ๋น ์งˆ ์ˆ˜ ์žˆ๋‹ค. End of explanation """ # # eigenvector centrality # def entry_fn(i, j): return 1 if (i, j) in friendships or (j, i) in friendships else 0 n = len(users) adjacency_matrix = make_matrix(n, n, entry_fn) adjacency_matrix """ Explanation: ์ด ํ–‰๋ ฌ์€ ๋ชจ๋“  ๋ฒกํ„ฐ [x, y]๋ฅผ [y, x]๋กœ ๋ณ€ํ™˜ํ•œ๋‹ค. ๋”ฐ๋ผ์„œ [1, 1]์€ ๊ณ ์œ ๊ฐ’์ด 1์ธ ๊ณ ์œ ๋ฒกํ„ฐ๊ฐ€ ๋œ๋‹ค. ํ•˜์ง€๋งŒ x, y๊ฐ’์ด ๋‹ค๋ฅธ ์ž„์˜์˜ ๋ฒกํ„ฐ์—์„œ ์ถœ๋ฐœํ•ด์„œ find_eigenvector๋ฅผ ์ˆ˜ํ–‰ํ•˜๋ฉด x, y๊ฐ’์„ ๋ฐ”๊พธ๋Š” ์—ฐ์‚ฐ๋งŒ ๋ฌดํ•œํžˆ ์ˆ˜ํ–‰ํ•  ๊ฒƒ์ด๋‹ค. (NumPy๊ฐ™์€ ๋ผ์ด๋ธŒ๋Ÿฌ๋ฆฌ์—๋Š” ์ด๋Ÿฐ ์ผ€์ด์Šค๊นŒ์ง€ ๋‹ค๋ฃฐ ์ˆ˜ ์žˆ๋Š” ๋‹ค์–‘ํ•œ ๋ฐฉ๋ฒ•๋“ค์ด ๊ตฌํ˜„๋˜์–ด ์žˆ๋‹ค.) ์ด๋Ÿฐ ์‚ฌ์†Œํ•œ ๋ฌธ์ œ์—๋„ ๋ถˆ๊ตฌํ•˜๊ณ , ์–ด์จŒ๋“  find_eigenvector๊ฐ€ ๊ฒฐ๊ณผ๊ฐ’์„ ๋ฐ˜ํ™˜ํ•œ๋‹ค๋ฉด, ๊ทธ ๊ฒฐ๊ณผ๊ฐ’์€ ๊ณง ๊ณ ์œ ๋ฒกํ„ฐ์ด๋‹ค. 21.2.2 ์ค‘์‹ฌ์„ฑ ๊ณ ์œ ๋ฒกํ„ฐ๊ฐ€ ๋ฐ์ดํ„ฐ ๋„คํŠธ์›Œํฌ๋ฅผ ์ดํ•ดํ•˜๋Š”๋ฐ ์–ด๋–ป๊ฒŒ ๋„์›€์„ ์ค„๊นŒ? ์–˜๊ธฐ๋ฅผ ํ•˜๊ธฐ ์ „์— ๋จผ์ € ๋„คํŠธ์›Œํฌ๋ฅผ ์ธ์ ‘ํ–‰๋ ฌ(adjacency matrix)์˜ ํ˜•ํƒœ๋กœ ๋‚˜ํƒ€๋‚ด ๋ณด์ž. ์ด ํ–‰๋ ฌ์€ ์‚ฌ์šฉ์ž i์™€ ์‚ฌ์šฉ์ž j๊ฐ€ ์นœ๊ตฌ์ธ ๊ฒฝ์šฐ (i, j)๋ฒˆ์งธ ํ•ญ๋ชฉ์— 1์ด ์žˆ๊ณ , ์นœ๊ตฌ๊ฐ€ ์•„๋‹Œ ๊ฒฝ์šฐ 0์ด ์žˆ๋Š” ํ–‰๋ ฌ์ด๋‹ค. End of explanation """ eigenvector_centralities, _ = find_eigenvector(adjacency_matrix) for user_id, centrality in enumerate(eigenvector_centralities): print(user_id, centrality) """ Explanation: ๊ฐ ์‚ฌ์šฉ์ž์˜ ๊ณ ์œ ๋ฒกํ„ฐ ์ค‘์‹ฌ์„ฑ์ด๋ž€ find_eigenvector๋กœ ์ฐพ์€ ์‚ฌ์šฉ์ž์˜ ๊ณ ์œ ๋ฒกํ„ฐ๊ฐ€ ๋œ๋‹ค. End of explanation """ # # directed graphs # endorsements = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2), (2, 1), (1, 3), (2, 3), (3, 4), (5, 4), (5, 6), (7, 5), (6, 8), (8, 7), (8, 9)] for user in users: user["endorses"] = [] # add one list to track outgoing endorsements user["endorsed_by"] = [] # and another to track endorsements for source_id, target_id in endorsements: users[source_id]["endorses"].append(users[target_id]) users[target_id]["endorsed_by"].append(users[source_id]) """ Explanation: ์—ฐ๊ฒฐ์˜ ์ˆ˜๊ฐ€ ๋งŽ๊ณ , ์ค‘์‹ฌ์„ฑ์ด ๋†’์€ ์‚ฌ์šฉ์ž๋“คํ•œํ…Œ ์—ฐ๊ฒฐ๋œ ์‚ฌ์šฉ์ž๋“ค์€ ๊ณ ์œ ๋ฒกํ„ฐ ์ค‘์‹ฌ์„ฑ์ด ๋†’๋‹ค. ์•ž์˜ ๊ฒฐ๊ณผ์— ๋”ฐ๋ฅด๋ฉด ์‚ฌ์šฉ์ž 1, ์‚ฌ์šฉ์ž 2์˜ ์ค‘์‹ฌ์„ฑ์ด ๊ฐ€์žฅ ๋†’์€๋ฐ, ์ด๋Š” ์ค‘์‹ฌ์„ฑ์ด ๋†’์€ ์‚ฌ๋žŒ๋“ค๊ณผ ์„ธ๋ฒˆ์ด๋‚˜ ์—ฐ๊ฒฐ๋˜์—ˆ๊ธฐ ๋•Œ๋ฌธ์ด๋‹ค. ์ด๋“ค๋กœ๋ถ€ํ„ฐ ๋ฉ€์–ด์งˆ์ˆ˜๋ก ์‚ฌ์šฉ์ž๋“ค์˜ ์ค‘์‹ฌ์„ฑ์€ ์ ์ฐจ ์ค„์–ด๋“ ๋‹ค. 21.3 ๋ฐฉํ–ฅ์„ฑ ๊ทธ๋ž˜ํ”„(Directed graphs)์™€ ํŽ˜์ด์ง€๋žญํฌ ๋ฐ์ดํ…€์ด ์ธ๊ธฐ๋ฅผ ๋ณ„๋กœ ๋Œ์ง€ ๋ชปํ•˜์ž, ์ˆœ์ด์ต ํŒ€์˜ ๋ถ€์‚ฌ์žฅ์€ ์นœ๊ตฌ ๋ชจ๋ธ์—์„œ ๋ณด์ฆ(endorsement)๋ชจ๋ธ๋กœ ์ „ํ–ฅํ•˜๋Š” ๊ฒƒ์„ ๊ณ ๋ ค ์ค‘์ด๋‹ค. ์•Œ๊ณ  ๋ณด๋‹ˆ ์‚ฌ๋žŒ๋“ค์€ ์–ด๋–ค ๋ฐ์ดํ„ฐ ๊ณผํ•™์ž๋“ค๋ผ๋ฆฌ ์นœ๊ตฌ์ธ์ง€์— ๋Œ€ํ•ด์„œ๋Š” ๋ณ„๋กœ ๊ด€์‹ฌ์ด ์—†์—ˆ์ง€๋งŒ, ํ—ค๋“œํ—Œํ„ฐ๋“ค์€ ๋‹ค๋ฅธ ๋ฐ์ดํ„ฐ ๊ณผํ•™์ž๋กœ๋ถ€ํ„ฐ ์กด๊ฒฝ ๋ฐ›๋Š” ๋ฐ์ดํ„ฐ ๊ณผํ•™์ž๊ฐ€ ๋ˆ„๊ตฌ์ธ์ง€์— ๋Œ€ํ•ด ๊ด€์‹ฌ์ด ๋งŽ๋‹ค. ์ด ์ƒˆ๋กœ์šด ๋ชจ๋ธ์—์„œ ๊ด€๊ณ„๋Š” ์ƒํ˜ธ์ ์ธ ๊ฒƒ์ด ์•„๋‹ˆ๋ผ, ํ•œ ์‚ฌ๋žŒ(source)์ด ๋‹ค๋ฅธ ๋ฉ‹์ง„ ํ•œ ์‚ฌ๋žŒ(target)์˜ ์‹ค๋ ฅ์— ๋ณด์ฆ์„ ์„œ์ฃผ๋Š” (source, target) ์Œ์œผ๋กœ ๋น„๋Œ€์นญ์ ์ธ ๊ด€๊ณ„๋ฅผ ํ‘œํ˜„ํ•˜๊ฒŒ ๋œ๋‹ค. End of explanation """ endorsements_by_id = [(user["id"], len(user["endorsed_by"])) for user in users] sorted(endorsements_by_id, key=lambda x: x[1], # (user_id, num_endorsements) reverse=True) """ Explanation: ๊ทธ๋ฆฌ๊ณ  ๊ฐ€์žฅ ๋ณด์ฆ์„ ๋งŽ์ด ๋ฐ›์€ ๋ฐ์ดํ„ฐ ๊ณผํ•™์ž๋“ค์˜ ๋ฐ์ดํ„ฐ๋ฅผ ์ˆ˜์ง‘ํ•ด์„œ, ๊ทธ๊ฒƒ์„ ํ—ค๋“œํ—Œํ„ฐ๋“คํ•œํ…Œ ํŒ”๋ฉด ๋œ๋‹ค. End of explanation """ def page_rank(users, damping = 0.85, num_iters = 100): # ๋จผ์ € ํŽ˜์ด์ง€๋žญํฌ๋ฅผ ๋ชจ๋“  ๋…ธ๋“œ์— ๊ณ ๋ฅด๊ฒŒ ๋ฐฐ๋‹น num_users = len(users) pr = { user["id"] : 1 / num_users for user in users } # ๋งค ์Šคํ…๋งˆ๋‹ค ๊ฐ ๋…ธ๋“œ๊ฐ€ ๋ฐ›๋Š” # ์ ์€ ์–‘์˜ ํŽ˜์ด์ง€๋žญํฌ base_pr = (1 - damping) / num_users for __ in range(num_iters): next_pr = { user["id"] : base_pr for user in users } for user in users: # ํŽ˜์ด์ง€๋žญํฌ๋ฅผ ์™ธ๋ถ€๋กœ ํ–ฅํ•˜๋Š” ๋งํฌ์— ๋ฐฐ๋‹นํ•œ๋‹ค. links_pr = pr[user["id"]] * damping for endorsee in user["endorses"]: next_pr[endorsee["id"]] += links_pr / len(user["endorses"]) pr = next_pr return pr for user_id, pr in page_rank(users).items(): print(user_id, pr) """ Explanation: ์‚ฌ์‹ค '๋ณด์ฆ์˜ ์ˆ˜'์™€ ๊ฐ™์€ ์ˆซ์ž๋Š” ์กฐ์ž‘ํ•˜๊ธฐ๊ฐ€ ๋งค์šฐ ์‰ฝ๋‹ค. ๊ฐ€์žฅ ๊ฐ„๋‹จํ•œ ๋ฐฉ๋ฒ• ์ค‘ ํ•˜๋‚˜๋Š”, ๊ฐ€์งœ ๊ณ„์ •์„ ์—ฌ๋Ÿฌ ๊ฐœ ๋งŒ๋“ค์–ด์„œ ๊ทธ๊ฒƒ๋“ค๋กœ ๋‚ด ๊ณ„์ •์— ๋Œ€ํ•œ ๋ณด์ฆ์„ ์„œ๋Š” ๊ฒƒ์ด๋‹ค. ๋˜ ๋‹ค๋ฅธ ๋ฐฉ๋ฒ•์€, ์นœ๊ตฌ๋“ค๋ผ๋ฆฌ ์งœ๊ณ  ์„œ๋กœ๊ฐ€ ์„œ๋กœ๋ฅผ ๋ณด์ฆํ•ด ์ฃผ๋Š” ๊ฒƒ์ด๋‹ค. (์•„๋งˆ ์‚ฌ์šฉ์ž 0, 1, 2๊ฐ€ ์ด๋Ÿฐ ๊ด€๊ณ„์ผ ๊ฐ€๋Šฅ์„ฑ์ด ํฌ๋‹ค.) ์ข€ ๋” ๋‚˜์€ ์ง€์ˆ˜๋Š”, '๋ˆ„๊ฐ€' ๋ณด์ฆ์„ ์„œ๋Š”์ง€๋ฅผ ๊ณ ๋ คํ•˜๋Š” ๊ฒƒ์ด๋‹ค. ๋ณด์ฆ์„ ๋งŽ์ด ๋ฐ›์€ ์‚ฌ์šฉ์ž๊ฐ€ ๋ณด์ฆ์„ ์„ค ๋•Œ๋Š”, ๋ณด์ฆ์„ ์ ๊ฒŒ ๋ฐ›์€ ์‚ฌ์šฉ์ž๊ฐ€ ๋ณด์ฆ์„ ์„ค ๋•Œ๋ณด๋‹ค ๋” ์ค‘์š”ํ•œ ๊ฒƒ์œผ๋กœ ๋ฐ›์•„๋“ค์—ฌ์ง€๋Š” ๊ฒƒ์ด ํƒ€๋‹นํ•˜๋‹ค. ๊ทธ๋ฆฌ๊ณ  ์‚ฌ์‹ค ์ด๊ฒƒ์€ ์œ ๋ช…ํ•œ ํŽ˜์ด์ง€๋žญํฌ(PageRank) ์•Œ๊ณ ๋ฆฌ์ฆ˜์˜ ๊ธฐ๋ณธ ์ฒ ํ•™์ด๊ธฐ๋„ ํ•˜๋‹ค. 1. ๋„คํŠธ์›Œํฌ ์ „์ฒด์—๋Š” 1.0(๋˜๋Š” 100%)์˜ ํŽ˜์ด์ง€๋žญํฌ๊ฐ€ ์žˆ๋‹ค. 2. ์ดˆ๊ธฐ์— ์ด ํŽ˜์ด์ง€๋žญํฌ๋ฅผ ๋ชจ๋“  ๋…ธ๋“œ์— ๊ณ ๋ฅด๊ฒŒ ๋ฐฐ๋‹นํ•œ๋‹ค. 3. ๊ฐ ์Šคํ…์„ ๊ฑฐ์น  ๋•Œ๋งˆ๋‹ค ๊ฐ ๋…ธ๋“œ์— ๋ฐฐ๋‹น๋œ ํŽ˜์ด์ง€๋žญํฌ์˜ ๋Œ€๋ถ€๋ถ„์€ ์™ธ๋ถ€๋กœ ํ–ฅํ•˜๋Š” ๋งํฌ์— ๊ท ๋“ฑํ•˜๊ฒŒ ๋ฐฐ๋‹นํ•œ๋‹ค. 4. ๊ฐ ์Šคํ…์„ ๊ฑฐ์น  ๋•Œ๋งˆ๋‹ค ๊ฐ ๋…ธ๋“œ์— ๋‚จ์•„ ์žˆ๋Š” ํŽ˜์ด์ง€๋žญํฌ๋ฅผ ๋ชจ๋“  ๋…ธ๋“œ์— ๊ณ ๋ฅด๊ฒŒ ๋ฐฐ๋‹นํ•œ๋‹ค. End of explanation """

Dataset description

This dataset consists of sequences of Python code followed by a a docstring explaining its function. It was constructed by concatenating code and text pairs from this dataset that were originally code and markdown cells in Jupyter Notebooks.

The content of each example the following:

[CODE]
"""
Explanation: [TEXT]
End of explanation
"""
[CODE]
"""
Explanation: [TEXT]
End of explanation
"""
...

How to use it

from datasets import load_dataset

ds = load_dataset("codeparrot/github-jupyter-code-to-text", split="train")
Dataset({
    features: ['repo_name', 'path', 'license', 'content'],
    num_rows: 47452
})
Downloads last month
65
Edit dataset card

Models trained or fine-tuned on codeparrot/github-jupyter-code-to-text