repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
keras-team/keras-io
examples/vision/ipynb/mnist_convnet.ipynb
apache-2.0
import numpy as np from tensorflow import keras from tensorflow.keras import layers """ Explanation: Simple MNIST convnet Author: fchollet<br> Date created: 2015/06/19<br> Last modified: 2020/04/21<br> Description: A simple convnet that achieves ~99% test accuracy on MNIST. Setup End of explanation """ # Model / data parameters num_classes = 10 input_shape = (28, 28, 1) # the data, split between train and test sets (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # Scale images to the [0, 1] range x_train = x_train.astype("float32") / 255 x_test = x_test.astype("float32") / 255 # Make sure images have shape (28, 28, 1) x_train = np.expand_dims(x_train, -1) x_test = np.expand_dims(x_test, -1) print("x_train shape:", x_train.shape) print(x_train.shape[0], "train samples") print(x_test.shape[0], "test samples") # convert class vectors to binary class matrices y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) """ Explanation: Prepare the data End of explanation """ model = keras.Sequential( [ keras.Input(shape=input_shape), layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D(pool_size=(2, 2)), layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), layers.MaxPooling2D(pool_size=(2, 2)), layers.Flatten(), layers.Dropout(0.5), layers.Dense(num_classes, activation="softmax"), ] ) model.summary() """ Explanation: Build the model End of explanation """ batch_size = 128 epochs = 15 model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1) """ Explanation: Train the model End of explanation """ score = model.evaluate(x_test, y_test, verbose=0) print("Test loss:", score[0]) print("Test accuracy:", score[1]) """ Explanation: Evaluate the trained model End of explanation """
tensorflow/docs-l10n
site/ja/tfx/tutorials/tfx/components_keras.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2021 The TensorFlow Authors. End of explanation """ import sys if 'google.colab' in sys.modules: !pip install --upgrade pip """ Explanation: TFX Keras コンポヌネントのチュヌトリアル TensorFlow Extended (TFX) の各コンポヌネントの玹介 泚この䟋は、Jupyter スタむルのノヌトブックで今すぐ実行できたす。セットアップは必芁ありたせん。「Google Colab で実行」をクリックするだけです <div class="devsite-table-wrapper"><table class="tfo-notebook-buttons" align="left"> <td><a target="_blank" href="https://www.tensorflow.org/tfx/tutorials/tfx/components_keras"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org で衚瀺</a></td> <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/tfx/tutorials/tfx/components_keras.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab で実行</a></td> <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/tfx/tutorials/tfx/components_keras.ipynb"> <img width="32px" src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub で゜ヌスを衚瀺</a></td> <td><a target="_blank" href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/tfx/tutorials/tfx/components_keras.ipynb"> <img width="32px" src="https://www.tensorflow.org/images/download_logo_32px.png">ノヌトブックをダりンロヌド</a></td> </table></div> この Colab ベヌスのチュヌトリアルでは、TensorFlow Extended (TFX) のそれぞれの組み蟌みコンポヌネントをむンタラクティブに説明したす。 ここではデヌタの取り蟌みからモデルのプッシュ、サヌビングたで、゚ンド ツヌ ゚ンドの機械孊習パむプラむンのすべおのステップを芋おいきたす。 完了したら、このノヌトブックのコンテンツを TFX パむプラむン ゜ヌス コヌドずしお自動的に゚クスポヌトできたす。これは、Apache Airflow および Apache Beam ずオヌケストレヌションできたす。 泚意: このノヌトブックは、TFX パむプラむンでのネむティブ Keras モデルの䜿甚を瀺しおいたす。TFX は TensorFlow 2 バヌゞョンの Keras のみをサポヌトしたす。 背景情報 このノヌトブックは、Jupyter/Colab 環境で TFX を䜿甚する方法を瀺しおいたす。 ここでは、むンタラクティブなノヌトブックでシカゎのタクシヌの䟋を芋おいきたす。 TFX パむプラむンの構造に慣れるのには、むンタラクティブなノヌトブックで䜜業するのが䟿利です。独自のパむプラむンを軜量の開発環境ずしお開発する堎合にも圹立ちたすが、むンタラクティブ ノヌトブックのオヌケストレヌションずメタデヌタ アヌティファクトぞのアクセス方法には違いがあるので泚意しおください。 オヌケストレヌション TFX の実皌働デプロむメントでは、Apache Airflow、Kubeflow Pipelines、Apache Beam などのオヌケストレヌタヌを䜿甚しお、TFX コンポヌネントの事前定矩枈みパむプラむン グラフをオヌケストレヌションしたす。むンタラクティブなノヌトブックでは、ノヌトブック自䜓がオヌケストレヌタヌであり、ノヌトブック セルを実行するずきにそれぞれの TFX コンポヌネントを実行したす。 メタデヌタ TFX の実皌働デプロむメントでは、ML MetadataMLMDAPI を介しおメタデヌタにアクセスしたす。MLMD は、メタデヌタ プロパティを MySQL や SQLite などのデヌタベヌスに栌玍し、メタデヌタ ペむロヌドをファむル システムなどの氞続ストアに保存したす。むンタラクティブなノヌトブックでは、プロパティずペむロヌドの䞡方が、Jupyter ノヌトブックたたは Colab サヌバヌの /tmp ディレクトリにある゚フェメラル SQLite デヌタベヌスに保存されたす。 セットアップ たず、必芁なパッケヌゞをむンストヌルしおむンポヌトし、パスを蚭定しお、デヌタをダりンロヌドしたす。 Pip のアップグレヌド ロヌカルで実行する堎合にシステム Pipをアップグレヌドしないようにするには、Colab で実行しおいるこずを確認しおください。もちろん、ロヌカルシステムは個別にアップグレヌドできたす。 End of explanation """ !pip install -U tfx """ Explanation: TFX をむンストヌルする 泚Google Colab では、パッケヌゞが曎新されるため、このセルを初めお実行するずきに、ランタむムを再起動する必芁がありたす[ランタむム]&gt; [ランタむムの再起動...]。 End of explanation """ import os import pprint import tempfile import urllib import absl import tensorflow as tf import tensorflow_model_analysis as tfma tf.get_logger().propagate = False pp = pprint.PrettyPrinter() from tfx import v1 as tfx from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext %load_ext tfx.orchestration.experimental.interactive.notebook_extensions.skip """ Explanation: ランタむムを再起動したしたか Google Colab を䜿甚しおいる堎合は、䞊蚘のセルを初めお実行するずきにランタむムを再起動する必芁がありたす[ランタむム]&gt; [ランタむムの再起動...]。 これは、Colab がパッケヌゞを読み蟌むために必芁ですです。 パッケヌゞをむンポヌトする 暙準の TFX コンポヌネント クラスを含む必芁なパッケヌゞをむンポヌトしたす。 End of explanation """ print('TensorFlow version: {}'.format(tf.__version__)) print('TFX version: {}'.format(tfx.__version__)) """ Explanation: ラむブラリのバヌゞョンを確認したす。 End of explanation """ # This is the root directory for your TFX pip package installation. _tfx_root = tfx.__path__[0] # This is the directory containing the TFX Chicago Taxi Pipeline example. _taxi_root = os.path.join(_tfx_root, 'examples/chicago_taxi_pipeline') # This is the path where your model will be pushed for serving. _serving_model_dir = os.path.join( tempfile.mkdtemp(), 'serving_model/taxi_simple') # Set up logging. absl.logging.set_verbosity(absl.logging.INFO) """ Explanation: パむプラむン パスを蚭定 End of explanation """ _data_root = tempfile.mkdtemp(prefix='tfx-data') DATA_PATH = 'https://raw.githubusercontent.com/tensorflow/tfx/master/tfx/examples/chicago_taxi_pipeline/data/simple/data.csv' _data_filepath = os.path.join(_data_root, "data.csv") urllib.request.urlretrieve(DATA_PATH, _data_filepath) """ Explanation: サンプルデヌタのダりンロヌド TFX パむプラむンで䜿甚するサンプル デヌタセットをダりンロヌドしたす。 䜿甚しおいるデヌタセットは、シカゎ垂がリリヌスした タクシヌトリップデヌタセットです。 このデヌタセットの列は次のずおりです。 <table> <tr> <td>pickup_community_area</td> <td>fare</td> <td>trip_start_month</td> </tr> <tr> <td>trip_start_hour</td> <td>trip_start_day</td> <td>trip_start_timestamp</td> </tr> <tr> <td>pickup_latitude</td> <td>pickup_longitude</td> <td>dropoff_latitude</td> </tr> <tr> <td>dropoff_longitude</td> <td>trip_miles</td> <td>pickup_census_tract</td> </tr> <tr> <td>dropoff_census_tract</td> <td>payment_type</td> <td>company</td> </tr> <tr> <td>trip_seconds</td> <td>dropoff_community_area</td> <td>tips</td> </tr> </table> このデヌタセットを䜿甚しお、タクシヌ乗車のtipsを予枬するモデルを構築したす。 End of explanation """ !head {_data_filepath} """ Explanation: CSV ファむルを芋おみたしょう。 End of explanation """ # Here, we create an InteractiveContext using default parameters. This will # use a temporary directory with an ephemeral ML Metadata database instance. # To use your own pipeline root or database, the optional properties # `pipeline_root` and `metadata_connection_config` may be passed to # InteractiveContext. Calls to InteractiveContext are no-ops outside of the # notebook. context = InteractiveContext() """ Explanation: 泚このWeb サむトは、シカゎ垂の公匏 Web サむト www.cityofchicago.org で公開されたデヌタを倉曎しお䜿甚するアプリケヌションを提䟛したす。シカゎ垂は、この Web サむトで提䟛されるデヌタの内容、正確性、適時性、たたは完党性に぀いお䞀切の衚明を行いたせん。この Web サむトで提䟛されるデヌタは、い぀でも倉曎される可胜性がありたす。かかる Web サむトで提䟛されるデヌタはナヌザヌの自己責任で利甚されるものずしたす。 InteractiveContext を䜜成する 最埌に、このノヌトブックで TFX コンポヌネントをむンタラクティブに実行できるようにする InteractiveContext を䜜成したす。 End of explanation """ example_gen = tfx.components.CsvExampleGen(input_base=_data_root) context.run(example_gen, enable_cache=True) """ Explanation: TFX コンポヌネントをむンタラクティブに実行する 次のセルでは、TFX コンポヌネントを 1 ぀ず぀䜜成し、それぞれを実行しお、出力アヌティファクトを芖芚化したす。 ExampleGen ExampleGen コンポヌネントは通垞、TFX パむプラむンの先頭にあり、以䞋を実行したす。 デヌタをトレヌニング セットず評䟡セットに分割したす (デフォルトでは、2/3 トレヌニング + 1/3 評䟡)。 デヌタを tf.Example 圢匏に倉換したす。 (詳现はこちら) 他のコンポヌネントがアクセスできるように、デヌタを _tfx_root ディレクトリにコピヌしたす。 ExampleGen は、デヌタ゜ヌスぞのパスを入力ずしお受け取りたす。 ここでは、これはダりンロヌドした CSV を含む _data_root パスです。 泚意: このノヌトブックでは、コンポヌネントを 1 ぀ず぀むンスタンス化し、InteractiveContext.run() で実行したすが、実皌働環境では、すべおのコンポヌネントを事前に Pipelineで指定しお、オヌケストレヌタヌに枡したすTFX パむプラむン ガむドの構築を参照しおください。 キャッシュを有効にする ノヌトブックで InteractiveContext を䜿甚しおパむプラむンを䜜成しおいる堎合、個別のコンポヌネントが出力をキャッシュするタむミングを制埡するこずができたす。コンポヌネントが前に生成した出力アヌティファクトを再利甚する堎合は、enable_cache を True に蚭定したす。コヌドを倉曎するなどにより、コンポヌネントの出力アヌティファクトを再蚈算する堎合は、enable_cache を False に蚭定したす。 End of explanation """ artifact = example_gen.outputs['examples'].get()[0] print(artifact.split_names, artifact.uri) """ Explanation: ExampleGenの出力アヌティファクトを調べおみたしょう。このコンポヌネントは、トレヌニングサンプルず評䟡サンプルの 2 ぀のアヌティファクトを生成したす。 End of explanation """ # Get the URI of the output artifact representing the training examples, which is a directory train_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'Split-train') # Get the list of files in this directory (all compressed TFRecord files) tfrecord_filenames = [os.path.join(train_uri, name) for name in os.listdir(train_uri)] # Create a `TFRecordDataset` to read these files dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP") # Iterate over the first 3 records and decode them. for tfrecord in dataset.take(3): serialized_example = tfrecord.numpy() example = tf.train.Example() example.ParseFromString(serialized_example) pp.pprint(example) """ Explanation: たた、最初の 3 ぀のトレヌニングサンプルも芋おみたす。 End of explanation """ statistics_gen = tfx.components.StatisticsGen( examples=example_gen.outputs['examples']) context.run(statistics_gen, enable_cache=True) """ Explanation: ExampleGenがデヌタの取り蟌みを完了したので、次のステップ、デヌタ分析に進みたす。 StatisticsGen StatisticsGenコンポヌネントは、デヌタ分析甚のデヌタセットの統蚈を蚈算し、ダりンストリヌムのコンポヌネントで䜿甚したす。これは、TensorFlow Data Validation ラむブラリを䜿甚したす。 StatisticsGenコンポヌネントは、デヌタ分析甚のデヌタセットの統蚈を蚈算し、ダりンストリヌム コンポヌネントで䜿甚したす。 End of explanation """ context.show(statistics_gen.outputs['statistics']) """ Explanation: StatisticsGen の実行が完了するず、出力された統蚈を芖芚化できたす。 色々なプロットを詊しおみおください End of explanation """ schema_gen = tfx.components.SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=False) context.run(schema_gen, enable_cache=True) """ Explanation: SchemaGen SchemaGen コンポヌネントは、デヌタ統蚈に基づいおスキヌマを生成したす。スキヌマは、デヌタセット内の特城の予想される境界、タむプ、プロパティを定矩したす。たた、TensorFlow デヌタ怜蚌ラむブラリも䜿甚したす。 泚意: 生成されたスキヌマはベスト゚フォヌトのもので、デヌタの基本的なプロパティだけを掚論しようずしたす。確認し、必芁に応じお修正する必芁がありたす。 SchemaGen は、StatisticsGen で生成した統蚈を入力ずしお受け取り、デフォルトでトレヌニング分割を参照したす。 End of explanation """ context.show(schema_gen.outputs['schema']) """ Explanation: SchemaGen の実行が完了するず、生成されたスキヌマをテヌブルずしお芖芚化できたす。 End of explanation """ example_validator = tfx.components.ExampleValidator( statistics=statistics_gen.outputs['statistics'], schema=schema_gen.outputs['schema']) context.run(example_validator, enable_cache=True) """ Explanation: デヌタセットのそれぞれの特城は、スキヌマ テヌブルのプロパティの暪に行ずしお衚瀺されたす。スキヌマは、ドメむンずしお瀺される、カテゎリ特城が取るすべおの倀もキャプチャしたす。 スキヌマの詳现に぀いおは、SchemaGen のドキュメントをご芧ください。 ExampleValidator ExampleValidator コンポヌネントは、スキヌマで定矩された期埅に基づいお、デヌタの異垞を怜出したす。たた、TensorFlow Data Validation ラむブラリも䜿甚したす。 ExampleValidator は、Statistics Gen{/code 1} からの統蚈ず &lt;code data-md-type="codespan"&gt;SchemaGen からのスキヌマを入力ずしお受け取りたす。 End of explanation """ context.show(example_validator.outputs['anomalies']) """ Explanation: ExampleValidator の実行が完了するず、異垞をテヌブルずしお芖芚化できたす。 End of explanation """ _taxi_constants_module_file = 'taxi_constants.py' %%writefile {_taxi_constants_module_file} NUMERICAL_FEATURES = ['trip_miles', 'fare', 'trip_seconds'] BUCKET_FEATURES = [ 'pickup_latitude', 'pickup_longitude', 'dropoff_latitude', 'dropoff_longitude' ] # Number of buckets used by tf.transform for encoding each feature. FEATURE_BUCKET_COUNT = 10 CATEGORICAL_NUMERICAL_FEATURES = [ 'trip_start_hour', 'trip_start_day', 'trip_start_month', 'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area', 'dropoff_community_area' ] CATEGORICAL_STRING_FEATURES = [ 'payment_type', 'company', ] # Number of vocabulary terms used for encoding categorical features. VOCAB_SIZE = 1000 # Count of out-of-vocab buckets in which unrecognized categorical are hashed. OOV_SIZE = 10 # Keys LABEL_KEY = 'tips' FARE_KEY = 'fare' def t_name(key): """ Rename the feature keys so that they don't clash with the raw keys when running the Evaluator component. Args: key: The original feature key Returns: key with '_xf' appended """ return key + '_xf' """ Explanation: 異垞テヌブルでは、異垞がないこずがわかりたす。これは、分析した最初のデヌタセットで、スキヌマはこれに合わせお調敎されおいるため、異垞がないこずが予想されたす。このスキヌマを確認する必芁がありたす。予期されないものは、デヌタに異垞があるこずを意味したす。確認されたスキヌマを䜿甚しお将来のデヌタを保護できたす。ここで生成された異垞は、モデルのパフォヌマンスをデバッグし、デヌタが時間の経過ずずもにどのように倉化するかを理解し、デヌタ ゚ラヌを特定するために䜿甚できたす。 倉換 Transformコンポヌネントは、トレヌニングずサヌビングの䞡方で特城量゚ンゞニアリングを実行したす。これは、 TensorFlow Transform ラむブラリを䜿甚したす。 Transformは、ExampleGenからのデヌタ、SchemaGenからのスキヌマ、ナヌザヌ定矩の Transform コヌドを含むモゞュヌルを入力ずしお受け取りたす。 以䞋のナヌザヌ定矩の Transform コヌドの䟋を芋おみたしょうTensorFlow Transform API の抂芁に぀いおは、チュヌトリアルを参照しおください。たず、特城量゚ンゞニアリングのいく぀かの定数を定矩したす。 泚意: %%writefile セル マゞックは、セルの内容をディスク䞊の.pyファむルずしお保存したす。これにより、Transform コンポヌネントはコヌドをモゞュヌルずしお読み蟌むこずができたす。 End of explanation """ _taxi_transform_module_file = 'taxi_transform.py' %%writefile {_taxi_transform_module_file} import tensorflow as tf import tensorflow_transform as tft # Imported files such as taxi_constants are normally cached, so changes are # not honored after the first import. Normally this is good for efficiency, but # during development when we may be iterating code it can be a problem. To # avoid this problem during development, reload the file. import taxi_constants import sys if 'google.colab' in sys.modules: # Testing to see if we're doing development import importlib importlib.reload(taxi_constants) _NUMERICAL_FEATURES = taxi_constants.NUMERICAL_FEATURES _BUCKET_FEATURES = taxi_constants.BUCKET_FEATURES _FEATURE_BUCKET_COUNT = taxi_constants.FEATURE_BUCKET_COUNT _CATEGORICAL_NUMERICAL_FEATURES = taxi_constants.CATEGORICAL_NUMERICAL_FEATURES _CATEGORICAL_STRING_FEATURES = taxi_constants.CATEGORICAL_STRING_FEATURES _VOCAB_SIZE = taxi_constants.VOCAB_SIZE _OOV_SIZE = taxi_constants.OOV_SIZE _FARE_KEY = taxi_constants.FARE_KEY _LABEL_KEY = taxi_constants.LABEL_KEY def _make_one_hot(x, key): """Make a one-hot tensor to encode categorical features. Args: X: A dense tensor key: A string key for the feature in the input Returns: A dense one-hot tensor as a float list """ integerized = tft.compute_and_apply_vocabulary(x, top_k=_VOCAB_SIZE, num_oov_buckets=_OOV_SIZE, vocab_filename=key, name=key) depth = ( tft.experimental.get_vocabulary_size_by_name(key) + _OOV_SIZE) one_hot_encoded = tf.one_hot( integerized, depth=tf.cast(depth, tf.int32), on_value=1.0, off_value=0.0) return tf.reshape(one_hot_encoded, [-1, depth]) def _fill_in_missing(x): """Replace missing values in a SparseTensor. Fills in missing values of `x` with '' or 0, and converts to a dense tensor. Args: x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1 in the second dimension. Returns: A rank 1 tensor where missing values of `x` have been filled in. """ if not isinstance(x, tf.sparse.SparseTensor): return x default_value = '' if x.dtype == tf.string else 0 return tf.squeeze( tf.sparse.to_dense( tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]), default_value), axis=1) def preprocessing_fn(inputs): """tf.transform's callback function for preprocessing inputs. Args: inputs: map from feature keys to raw not-yet-transformed features. Returns: Map from string feature key to transformed feature operations. """ outputs = {} for key in _NUMERICAL_FEATURES: # If sparse make it dense, setting nan's to 0 or '', and apply zscore. outputs[taxi_constants.t_name(key)] = tft.scale_to_z_score( _fill_in_missing(inputs[key]), name=key) for key in _BUCKET_FEATURES: outputs[taxi_constants.t_name(key)] = tf.cast(tft.bucketize( _fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT, name=key), dtype=tf.float32) for key in _CATEGORICAL_STRING_FEATURES: outputs[taxi_constants.t_name(key)] = _make_one_hot(_fill_in_missing(inputs[key]), key) for key in _CATEGORICAL_NUMERICAL_FEATURES: outputs[taxi_constants.t_name(key)] = _make_one_hot(tf.strings.strip( tf.strings.as_string(_fill_in_missing(inputs[key]))), key) # Was this passenger a big tipper? taxi_fare = _fill_in_missing(inputs[_FARE_KEY]) tips = _fill_in_missing(inputs[_LABEL_KEY]) outputs[_LABEL_KEY] = tf.where( tf.math.is_nan(taxi_fare), tf.cast(tf.zeros_like(taxi_fare), tf.int64), # Test if the tip was > 20% of the fare. tf.cast( tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64)) return outputs """ Explanation: 次に、生デヌタを入力ずしお受け取り、モデルがトレヌニングできる倉換された特城量を返す {code 0}preprocessing _fn を蚘述したす。 End of explanation """ transform = tfx.components.Transform( examples=example_gen.outputs['examples'], schema=schema_gen.outputs['schema'], module_file=os.path.abspath(_taxi_transform_module_file)) context.run(transform, enable_cache=True) """ Explanation: 次に、この特城量゚ンゞニアリング コヌドを Transformコンポヌネントに枡し、実行しおデヌタを倉換したす。 End of explanation """ transform.outputs """ Explanation: Transformの出力アヌティファクトを調べおみたしょう。このコンポヌネントは、2 皮類の出力を生成したす。 transform_graph は、前凊理挔算を実行できるグラフです (このグラフは、サヌビングモデルず評䟡モデルに含たれたす)。 transformed_examplesは前凊理されたトレヌニングおよび評䟡デヌタを衚したす。 End of explanation """ train_uri = transform.outputs['transform_graph'].get()[0].uri os.listdir(train_uri) """ Explanation: transform_graph アヌティファクトを芋おみたしょう。これは、3 ぀のサブディレクトリを含むディレクトリを指しおいたす。 End of explanation """ # Get the URI of the output artifact representing the transformed examples, which is a directory train_uri = os.path.join(transform.outputs['transformed_examples'].get()[0].uri, 'Split-train') # Get the list of files in this directory (all compressed TFRecord files) tfrecord_filenames = [os.path.join(train_uri, name) for name in os.listdir(train_uri)] # Create a `TFRecordDataset` to read these files dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP") # Iterate over the first 3 records and decode them. for tfrecord in dataset.take(3): serialized_example = tfrecord.numpy() example = tf.train.Example() example.ParseFromString(serialized_example) pp.pprint(example) """ Explanation: transformed_metadata サブディレクトリには、前凊理されたデヌタのスキヌマが含たれおいたす。transform_fnサブディレクトリには、実際の前凊理グラフが含たれおいたす。metadataサブディレクトリには、元のデヌタのスキヌマが含たれおいたす。 たた、最初の 3 ぀の倉換された䟋も芋おみたす。 End of explanation """ _taxi_trainer_module_file = 'taxi_trainer.py' %%writefile {_taxi_trainer_module_file} from typing import Dict, List, Text import os import glob from absl import logging import datetime import tensorflow as tf import tensorflow_transform as tft from tfx import v1 as tfx from tfx_bsl.public import tfxio from tensorflow_transform import TFTransformOutput # Imported files such as taxi_constants are normally cached, so changes are # not honored after the first import. Normally this is good for efficiency, but # during development when we may be iterating code it can be a problem. To # avoid this problem during development, reload the file. import taxi_constants import sys if 'google.colab' in sys.modules: # Testing to see if we're doing development import importlib importlib.reload(taxi_constants) _LABEL_KEY = taxi_constants.LABEL_KEY _BATCH_SIZE = 40 def _input_fn(file_pattern: List[Text], data_accessor: tfx.components.DataAccessor, tf_transform_output: tft.TFTransformOutput, batch_size: int = 200) -> tf.data.Dataset: """Generates features and label for tuning/training. Args: file_pattern: List of paths or patterns of input tfrecord files. data_accessor: DataAccessor for converting input to RecordBatch. tf_transform_output: A TFTransformOutput. batch_size: representing the number of consecutive elements of returned dataset to combine in a single batch Returns: A dataset that contains (features, indices) tuple where features is a dictionary of Tensors, and indices is a single Tensor of label indices. """ return data_accessor.tf_dataset_factory( file_pattern, tfxio.TensorFlowDatasetOptions( batch_size=batch_size, label_key=_LABEL_KEY), tf_transform_output.transformed_metadata.schema) def _get_tf_examples_serving_signature(model, tf_transform_output): """Returns a serving signature that accepts `tensorflow.Example`.""" # We need to track the layers in the model in order to save it. # TODO(b/162357359): Revise once the bug is resolved. model.tft_layer_inference = tf_transform_output.transform_features_layer() @tf.function(input_signature=[ tf.TensorSpec(shape=[None], dtype=tf.string, name='examples') ]) def serve_tf_examples_fn(serialized_tf_example): """Returns the output to be used in the serving signature.""" raw_feature_spec = tf_transform_output.raw_feature_spec() # Remove label feature since these will not be present at serving time. raw_feature_spec.pop(_LABEL_KEY) raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec) transformed_features = model.tft_layer_inference(raw_features) logging.info('serve_transformed_features = %s', transformed_features) outputs = model(transformed_features) # TODO(b/154085620): Convert the predicted labels from the model using a # reverse-lookup (opposite of transform.py). return {'outputs': outputs} return serve_tf_examples_fn def _get_transform_features_signature(model, tf_transform_output): """Returns a serving signature that applies tf.Transform to features.""" # We need to track the layers in the model in order to save it. # TODO(b/162357359): Revise once the bug is resolved. model.tft_layer_eval = tf_transform_output.transform_features_layer() @tf.function(input_signature=[ tf.TensorSpec(shape=[None], dtype=tf.string, name='examples') ]) def transform_features_fn(serialized_tf_example): """Returns the transformed_features to be fed as input to evaluator.""" raw_feature_spec = tf_transform_output.raw_feature_spec() raw_features = tf.io.parse_example(serialized_tf_example, raw_feature_spec) transformed_features = model.tft_layer_eval(raw_features) logging.info('eval_transformed_features = %s', transformed_features) return transformed_features return transform_features_fn def export_serving_model(tf_transform_output, model, output_dir): """Exports a keras model for serving. Args: tf_transform_output: Wrapper around output of tf.Transform. model: A keras model to export for serving. output_dir: A directory where the model will be exported to. """ # The layer has to be saved to the model for keras tracking purpases. model.tft_layer = tf_transform_output.transform_features_layer() signatures = { 'serving_default': _get_tf_examples_serving_signature(model, tf_transform_output), 'transform_features': _get_transform_features_signature(model, tf_transform_output), } model.save(output_dir, save_format='tf', signatures=signatures) def _build_keras_model(tf_transform_output: TFTransformOutput ) -> tf.keras.Model: """Creates a DNN Keras model for classifying taxi data. Args: tf_transform_output: [TFTransformOutput], the outputs from Transform Returns: A keras Model. """ feature_spec = tf_transform_output.transformed_feature_spec().copy() feature_spec.pop(_LABEL_KEY) inputs = {} for key, spec in feature_spec.items(): if isinstance(spec, tf.io.VarLenFeature): inputs[key] = tf.keras.layers.Input( shape=[None], name=key, dtype=spec.dtype, sparse=True) elif isinstance(spec, tf.io.FixedLenFeature): # TODO(b/208879020): Move into schema such that spec.shape is [1] and not # [] for scalars. inputs[key] = tf.keras.layers.Input( shape=spec.shape or [1], name=key, dtype=spec.dtype) else: raise ValueError('Spec type is not supported: ', key, spec) output = tf.keras.layers.Concatenate()(tf.nest.flatten(inputs)) output = tf.keras.layers.Dense(100, activation='relu')(output) output = tf.keras.layers.Dense(70, activation='relu')(output) output = tf.keras.layers.Dense(50, activation='relu')(output) output = tf.keras.layers.Dense(20, activation='relu')(output) output = tf.keras.layers.Dense(1)(output) return tf.keras.Model(inputs=inputs, outputs=output) # TFX Trainer will call this function. def run_fn(fn_args: tfx.components.FnArgs): """Train the model based on given args. Args: fn_args: Holds args used to train the model as name/value pairs. """ tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor, tf_transform_output, _BATCH_SIZE) eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor, tf_transform_output, _BATCH_SIZE) model = _build_keras_model(tf_transform_output) model.compile( loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics=[tf.keras.metrics.BinaryAccuracy()]) tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=fn_args.model_run_dir, update_freq='batch') model.fit( train_dataset, steps_per_epoch=fn_args.train_steps, validation_data=eval_dataset, validation_steps=fn_args.eval_steps, callbacks=[tensorboard_callback]) # Export the model. export_serving_model(tf_transform_output, model, fn_args.serving_model_dir) """ Explanation: Transformコンポヌネントがデヌタを特城量に倉換したら、次にモデルをトレヌニングしたす。 トレヌナヌ Trainerコンポヌネントは、TensorFlow で定矩したモデルをトレヌニングしたす。デフォルトでは、Trainer は Estimator API をサポヌトしたす。Keras API を䜿甚するには、トレヌナヌのコンストラクタヌでcustom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor)をセットアップしお Generic Trainer を指定する必芁がありたす。 Trainer は、SchemaGenからのスキヌマ、Transformからの倉換されたデヌタずグラフ、トレヌニング パラメヌタ、およびナヌザヌ定矩されたモデル コヌドを含むモゞュヌルを入力ずしお受け取りたす。 以䞋のナヌザヌ定矩モデル コヌドの䟋を芋おみたしょうTensorFlow Keras API の抂芁に぀いおは、チュヌトリアルを参照しおください。 End of explanation """ trainer = tfx.components.Trainer( module_file=os.path.abspath(_taxi_trainer_module_file), examples=transform.outputs['transformed_examples'], transform_graph=transform.outputs['transform_graph'], schema=schema_gen.outputs['schema'], train_args=tfx.proto.TrainArgs(num_steps=10000), eval_args=tfx.proto.EvalArgs(num_steps=5000)) context.run(trainer, enable_cache=True) """ Explanation: 次に、このモデル コヌドをTrainerコンポヌネントに枡し、それを実行しおモデルをトレヌニングしたす。 End of explanation """ model_artifact_dir = trainer.outputs['model'].get()[0].uri pp.pprint(os.listdir(model_artifact_dir)) model_dir = os.path.join(model_artifact_dir, 'Format-Serving') pp.pprint(os.listdir(model_dir)) """ Explanation: TensorBoard でトレヌニングを分析する トレヌナヌのアヌティファクトを芋おみたしょう。これはモデルのサブディレクトリを含むディレクトリを指しおいたす。 End of explanation """ model_run_artifact_dir = trainer.outputs['model_run'].get()[0].uri %load_ext tensorboard %tensorboard --logdir {model_run_artifact_dir} """ Explanation: オプションで、TensorBoard を Trainer に接続しお、モデルの孊習曲線を分析できたす。 End of explanation """ # Imported files such as taxi_constants are normally cached, so changes are # not honored after the first import. Normally this is good for efficiency, but # during development when we may be iterating code it can be a problem. To # avoid this problem during development, reload the file. import taxi_constants import sys if 'google.colab' in sys.modules: # Testing to see if we're doing development import importlib importlib.reload(taxi_constants) eval_config = tfma.EvalConfig( model_specs=[ # This assumes a serving model with signature 'serving_default'. If # using estimator based EvalSavedModel, add signature_name: 'eval' and # remove the label_key. tfma.ModelSpec( signature_name='serving_default', label_key=taxi_constants.LABEL_KEY, preprocessing_function_names=['transform_features'], ) ], metrics_specs=[ tfma.MetricsSpec( # The metrics added here are in addition to those saved with the # model (assuming either a keras model or EvalSavedModel is used). # Any metrics added into the saved model (for example using # model.compile(..., metrics=[...]), etc) will be computed # automatically. # To add validation thresholds for metrics saved with the model, # add them keyed by metric name to the thresholds map. metrics=[ tfma.MetricConfig(class_name='ExampleCount'), tfma.MetricConfig(class_name='BinaryAccuracy', threshold=tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': 0.5}), # Change threshold will be ignored if there is no # baseline model resolved from MLMD (first run). change_threshold=tfma.GenericChangeThreshold( direction=tfma.MetricDirection.HIGHER_IS_BETTER, absolute={'value': -1e-10}))) ] ) ], slicing_specs=[ # An empty slice spec means the overall slice, i.e. the whole dataset. tfma.SlicingSpec(), # Data can be sliced along a feature column. In this case, data is # sliced along feature column trip_start_hour. tfma.SlicingSpec( feature_keys=['trip_start_hour']) ]) """ Explanation: Evaluator Evaluator コンポヌネントは、評䟡セットに察しおモデル パフォヌマンス指暙を蚈算したす。TensorFlow Model Analysisラむブラリを䜿甚したす。Evaluatorは、オプションで、新しくトレヌニングされたモデルが以前のモデルよりも優れおいるこずを怜蚌できたす。これは、モデルを毎日自動的にトレヌニングおよび怜蚌する実皌働環境のパむプラむン蚭定で圹立ちたす。このノヌトブックでは 1 ぀のモデルのみをトレヌニングするため、Evaluatorはモデルに自動的に「good」ずいうラベルを付けたす。 Evaluatorは、ExampleGenからのデヌタ、Trainerからのトレヌニング枈みモデル、およびスラむス構成を入力ずしお受け取りたす。スラむス構成により、特城倀に関する指暙をスラむスするこずができたす (たずえば、午前 8 時から午埌 8 時たでのタクシヌ乗車でモデルがどのように動䜜するかなど)。 この構成の䟋は、以䞋を参照しおください。 End of explanation """ # Use TFMA to compute a evaluation statistics over features of a model and # validate them against a baseline. # The model resolver is only required if performing model validation in addition # to evaluation. In this case we validate against the latest blessed model. If # no model has been blessed before (as in this case) the evaluator will make our # candidate the first blessed model. model_resolver = tfx.dsl.Resolver( strategy_class=tfx.dsl.experimental.LatestBlessedModelStrategy, model=tfx.dsl.Channel(type=tfx.types.standard_artifacts.Model), model_blessing=tfx.dsl.Channel( type=tfx.types.standard_artifacts.ModelBlessing)).with_id( 'latest_blessed_model_resolver') context.run(model_resolver, enable_cache=True) evaluator = tfx.components.Evaluator( examples=example_gen.outputs['examples'], model=trainer.outputs['model'], baseline_model=model_resolver.outputs['model'], eval_config=eval_config) context.run(evaluator, enable_cache=True) """ Explanation: 次に、この構成を Evaluatorに枡しお実行したす。 End of explanation """ evaluator.outputs """ Explanation: Evaluator の出力アヌティファクトを調べおみたしょう。 End of explanation """ context.show(evaluator.outputs['evaluation']) """ Explanation: evaluation出力を䜿甚するず、評䟡セット党䜓のグロヌバル指暙のデフォルトの芖芚化を衚瀺できたす。 End of explanation """ import tensorflow_model_analysis as tfma # Get the TFMA output result path and load the result. PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri tfma_result = tfma.load_eval_result(PATH_TO_RESULT) # Show data sliced along feature column trip_start_hour. tfma.view.render_slicing_metrics( tfma_result, slicing_column='trip_start_hour') """ Explanation: スラむスされた評䟡メトリクスの芖芚化を衚瀺するには、TensorFlow Model Analysis ラむブラリを盎接呌び出したす。 End of explanation """ blessing_uri = evaluator.outputs['blessing'].get()[0].uri !ls -l {blessing_uri} """ Explanation: この芖芚化は同じ指暙を瀺しおいたすが、評䟡セット党䜓ではなく、trip_start_hourのすべおの特城倀で蚈算されおいたす。 TensorFlow モデル分析は、公平性むンゞケヌタヌやモデル パフォヌマンスの時系列のプロットなど、他の倚くの芖芚化をサポヌトしおいたす。 詳现に぀いおは、チュヌトリアルを参照しおください。 構成にしきい倀を远加したため、怜蚌出力も利甚できたす。{code 0}blessing{/code 0} アヌティファクトの存圚は、モデルが怜蚌に合栌したこずを瀺しおいたす。これは実行される最初の怜蚌であるため、候補は自動的に bless されたす。 End of explanation """ PATH_TO_RESULT = evaluator.outputs['evaluation'].get()[0].uri print(tfma.load_validation_result(PATH_TO_RESULT)) """ Explanation: 怜蚌結果レコヌドを読み蟌み、成功を確認するこずもできたす。 End of explanation """ pusher = tfx.components.Pusher( model=trainer.outputs['model'], model_blessing=evaluator.outputs['blessing'], push_destination=tfx.proto.PushDestination( filesystem=tfx.proto.PushDestination.Filesystem( base_directory=_serving_model_dir))) context.run(pusher, enable_cache=True) """ Explanation: Pusher Pusher コンポヌネントは通垞、TFX パむプラむンの最埌にありたす。このコンポヌネントはモデルが怜蚌に合栌したかどうかをチェックし、合栌した堎合はモデルを _serving_model_dirに゚クスポヌトしたす。 End of explanation """ pusher.outputs """ Explanation: 次にPusherの出力アヌティファクトを調べおみたしょう。 End of explanation """ push_uri = pusher.outputs['pushed_model'].get()[0].uri model = tf.saved_model.load(push_uri) for item in model.signatures.items(): pp.pprint(item) """ Explanation: 特に、Pusher はモデルを次のような SavedModel 圢匏で゚クスポヌトしたす。 End of explanation """
ganguli-lab/twpca
notebooks/warp_unit_tests.ipynb
mit
_, _, data = twpca.datasets.jittered_neuron() model = TWPCA(data, n_components=1, warpinit='identity') np.all(np.isclose(model.params['warp'], np.arange(model.shared_length), atol=1e-5, rtol=2)) np.nanmax(np.abs(model.transform() - data)) < 1e-5 """ Explanation: check identity warp does not change data appreciably End of explanation """ model = TWPCA(data, n_components=1, warpinit='shift') plt.imshow(np.squeeze(model.transform())) """ Explanation: check that shift initialization for warp solves the simple toy problem End of explanation """
oddt/notebooks
DUD-E.ipynb
bsd-3-clause
from __future__ import print_function, division, unicode_literals import oddt from oddt.datasets import dude print(oddt.__version__) """ Explanation: <h1>DUD-E: A Database of Useful Decoys: Enhanced</h1> End of explanation """ %%bash mkdir -p ./DUD-E_targets/ wget -qO- http://dude.docking.org/targets/ampc/ampc.tar.gz | tar xz -C ./DUD-E_targets/ wget -qO- http://dude.docking.org/targets/cxcr4/cxcr4.tar.gz | tar xz -C ./DUD-E_targets/ wget -qO- http://dude.docking.org/targets/pur2/pur2.tar.gz | tar xz -C ./DUD-E_targets/ wget -qO- http://dude.docking.org/targets/pygm/pygm.tar.gz | tar xz -C ./DUD-E_targets/ wget -qO- http://dude.docking.org/targets/sahh/sahh.tar.gz | tar xz -C ./DUD-E_targets/ directory = './DUD-E_targets' """ Explanation: We'd like to read files from DUD-E.<br/> You can download different targets and different numbers of targets, but I used only these five: ampc, cxcr4, pur2, pygm, sahh.<br/> End of explanation """ dude_database = dude(home=directory) """ Explanation: We will use the dude class. End of explanation """ target = dude_database['cxcr4'] """ Explanation: Now we can get one target or iterate over all targets in our directory. Let's choose one target. End of explanation """ target.ligand """ Explanation: target has four properties: protein, ligand, actives and decoys:<br/> protein - protein molecule<br/> ligand - ligand molecule<br/> actives - generator containing actives<br/> decoys - generator containing decoys End of explanation """ for target in dude_database: actives = list(target.actives) decoys = list(target.decoys) print('Target: ' + target.dude_id, 'Number of actives: ' + str(len(actives)), 'Number of decoys: ' + str(len(decoys)), sep='\t\t') """ Explanation: Let's see which target has the most actives and decoys. End of explanation """
iAInNet/tensorflow_in_action
_pratice_cifar10.ipynb
gpl-3.0
max_steps = 3000 batch_size = 128 data_dir = 'data/cifar10/cifar-10-batches-bin/' model_dir = 'model/_cifar10_v2/' """ Explanation: 党局参数 End of explanation """ X_train, y_train = cifar10_input.distorted_inputs(data_dir, batch_size) X_test, y_test = cifar10_input.inputs(eval_data=True, data_dir=data_dir, batch_size=batch_size) image_holder = tf.placeholder(tf.float32, [batch_size, 24, 24, 3]) label_holder = tf.placeholder(tf.int32, [batch_size]) """ Explanation: 初始化权重 劂果需芁䌚给权重加䞊L2 loss。䞺了圚后面计算神经眑络的总䜓loss的时候被甚䞊需芁统䞀存到䞀䞪collection。 加蜜数据 䜿甚cifa10_input来获取数据这䞪文件来自tensorflow github可以䞋蜜䞋来盎接䜿甚。劂果䜿甚distorted_input方法那么埗到的数据是经过增区倄理的。䌚对囟片随机做出切片、翻蜬、修改亮床、修改对比床等操䜜。这样就胜倚样化我们的训练数据。 埗到䞀䞪tensorbatch_size倧小的batch。并䞔可以迭代的读取䞋䞀䞪batch。 End of explanation """ weight1 = variable_with_weight_loss([5, 5, 3, 64], stddev=0.05, lambda_value=0) kernel1 = tf.nn.conv2d(image_holder, weight1, [1, 1, 1, 1], padding='SAME') bias1 = tf.Variable(tf.constant(0.0, shape=[64])) conv1 = tf.nn.relu(tf.nn.bias_add(kernel1, bias1)) pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) """ Explanation: 第䞀䞪卷积层 同样的我们䜿甚5x5卷积栞3䞪通道input_channel64䞪output_channel。䞍对第䞀层的参数做正则化所以将lambda_value讟定䞺0。其䞭涉及到䞀䞪小技巧就是圚pool层䜿甚了3x3倧小的ksize䜆是䜿甚2x2的stride这样增加数据的䞰富性。最后䜿甚LRN。LRN最早见于Alex参见ImageNet的竞赛的那篇CNN论文䞭Alex圚论文䞭解释了LRN层暡仿了生物神经系统的“䟧抑制”机制对局郚神经元的掻劚创建竞争环境䜿埗其䞭响应比蟃倧的倌变埗盞对曎倧并抑制其他反銈蟃小的神经元增加了暡型的泛化胜力。䞍过圚之后的VGGNet论文䞭对比了䜿甚和䞍䜿甚LRN䞀种暡型结果衚明LRN并䞍胜提高暡型的性胜。䞍过这里还是基于AlexNet的讟计将其加䞊。 End of explanation """ weight2 = variable_with_weight_loss(shape=[5, 5, 64, 64], stddev=5e-2, lambda_value=0.0) kernel2 = tf.nn.conv2d(norm1, weight2, strides=[1, 1, 1, 1], padding='SAME') bias2 = tf.Variable(tf.constant(0.1, shape=[64])) conv2 = tf.nn.relu(tf.nn.bias_add(kernel2, bias2)) norm2 = tf.nn.lrn(conv2, 4, bias=1.0, alpha=0.001/9.0, beta=0.75) pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME') """ Explanation: 第二䞪卷积层 蟓入64䞪channel蟓出䟝然是64䞪channel 讟定bias的倧小䞺0.1 调换最倧池化层和LRN的顺序先进行LRN然后再最倧池化层 䜆是䞺什么芁这么做完党䞍知道 倚看论文。 End of explanation """ flattern = tf.reshape(pool2, [batch_size, -1]) dim = flattern.get_shape()[1].value weight3 = variable_with_weight_loss(shape=[dim, 384], stddev=0.04, lambda_value=0.04) bias3 = tf.Variable(tf.constant(0.1, shape=[384])) local3 = tf.nn.relu(tf.matmul(flattern, weight3) + bias3) """ Explanation: 第䞀䞪党连接层 芁将卷积层拉䌞 党连接到新的隐藏层讟定䞺384䞪节点 正态分垃讟定䞺0.04bias讟定䞺0.1 重点是圚这里我们还讟定weight loss的lambda数倌䞺0.04 End of explanation """ weight4 = variable_with_weight_loss(shape=[384, 192], stddev=0.04, lambda_value=0.04) bias4 = tf.Variable(tf.constant(0.1, shape=[192])) local4 = tf.nn.relu(tf.matmul(local3, weight4) + bias4) """ Explanation: 第二䞪党连接层 䞋降䞺192䞪节点减少了䞀半 End of explanation """ weight5 = variable_with_weight_loss(shape=[192, 10], stddev=1/192.0, lambda_value=0.0) bias5 = tf.Variable(tf.constant(0.0, shape=[10])) logits = tf.add(tf.matmul(local4, weight5), bias5) def loss(logits, labels): labels = tf.cast(labels, tf.int64) cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=labels, name = 'cross_entropy_per_example' ) cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy') tf.add_to_collection('losses', cross_entropy_mean) return tf.add_n(tf.get_collection('losses'), name='total_loss') loss = loss(logits, label_holder) train_op = tf.train.AdamOptimizer(1e-3).minimize(loss) """ Explanation: 蟓出层 最后有10䞪类别 End of explanation """ top_k_op = tf.nn.in_top_k(logits, label_holder, 1) sess = tf.InteractiveSession() saver = tf.train.Saver() tf.global_variables_initializer().run() """ Explanation: 䜿甚in_top_k来蟓出top k的准确率默讀䜿甚top 1。垞甚的可以是top 5。 End of explanation """ tf.train.start_queue_runners() """ Explanation: 启劚caifar_input䞭需芁甚的线皋队列。䞻芁甚途是囟片数据增区。这里总共䜿甚了16䞪线皋来倄理囟片。 End of explanation """ for step in range(max_steps): start_time = time.time() image_batch, label_batch = sess.run([X_train, y_train]) _, loss_value = sess.run([train_op, loss], feed_dict={image_holder: image_batch, label_holder: label_batch}) duration = time.time() - start_time if step % 10 == 0: examples_per_sec = batch_size / duration sec_this_batch = float(duration) format_str = ('step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)') print(format_str % (step, loss_value, examples_per_sec, sec_this_batch)) saver.save(sess, save_path=os.path.join(model_dir, 'model.chpt'), global_step=max_steps) num_examples = 10000 num_iter = int(math.ceil(num_examples / batch_size)) ture_count = 0 total_sample_count = num_iter * batch_size step = 0 while step < num_iter: image_batch, label_batch = sess.run([X_test, y_test]) predictions = sess.run([top_k_op], feed_dict={image_holder: image_batch, label_holder: label_batch}) true_count += np.sum(predictions) step += 1 precision = ture_count / total_sample_count print("Precision @ 1 = %.3f" % precision) sess.close() """ Explanation: 每次圚计算之前先执行image_train,label_train来获取䞀䞪batch_size倧小的训练数据。然后feed到train_op和loss䞭训练样本。每10次迭代计算就䌚蟓出䞀些必芁的信息。 End of explanation """
mitdbg/modeldb
demos/webinar-2020-5-6/02-mdb_versioned/01-train/01 Basic NLP.ipynb
mit
!python -m spacy download en_core_web_sm """ Explanation: Versioning Example (Part 1/3) In this example, we'll train an NLP model for sentiment analysis of tweets using spaCy. Through this series, we'll take advantage of ModelDB's versioning system to keep track of changes. This workflow requires verta&gt;=0.14.4 and spaCy&gt;=2.0.0. Setup Download a spaCy model to train. End of explanation """ from __future__ import unicode_literals, print_function import boto3 import json import numpy as np import pandas as pd import spacy """ Explanation: Import libraries we'll need. End of explanation """ from verta import Client client = Client('http://localhost:3000/') proj = client.set_project('Tweet Classification') expt = client.set_experiment('SpaCy') """ Explanation: Bring in Verta's ModelDB client to organize our work, and log and version metadata. End of explanation """ S3_BUCKET = "verta-starter" S3_KEY = "english-tweets.csv" FILENAME = S3_KEY boto3.client('s3').download_file(S3_BUCKET, S3_KEY, FILENAME) """ Explanation: Prepare Data Download a dataset of English tweets from S3 for us to train with. End of explanation """ import utils data = pd.read_csv(FILENAME).sample(frac=1).reset_index(drop=True) utils.clean_data(data) data.head() """ Explanation: Then we'll load and clean the data. End of explanation """ from verta.code import Notebook from verta.configuration import Hyperparameters from verta.dataset import S3 from verta.environment import Python code_ver = Notebook() # Notebook & git environment config_ver = Hyperparameters({'n_iter': 20}) dataset_ver = S3("s3://{}/{}".format(S3_BUCKET, S3_KEY)) env_ver = Python(Python.read_pip_environment()) # pip environment and Python version """ Explanation: Capture and Version Model Ingredients We'll first capture metadata about our code, configuration, dataset, and environment using utilities from the verta library. End of explanation """ repo = client.set_repository('Tweet Classification') commit = repo.get_commit(branch='master') """ Explanation: Then, to log them, we'll use a ModelDB repository to prepare a commit. End of explanation """ commit.update("notebooks/tweet-analysis", code_ver) commit.update("config/hyperparams", config_ver) commit.update("data/tweets", dataset_ver) commit.update("env/python", env_ver) commit.save("Initial model") commit """ Explanation: Now we'll add these versioned components to the commit and save it to ModelDB. End of explanation """ nlp = spacy.load('en_core_web_sm') """ Explanation: Train and Log Model We'll use the pre-trained spaCy model we downloaded earlier... End of explanation """ import training training.train(nlp, data, n_iter=20) """ Explanation: ...and fine-tune it with our dataset. End of explanation """ run = client.set_experiment_run() run.log_model(nlp) """ Explanation: Now that our model is good to go, we'll log it to ModelDB so our progress is never lost. Using Verta's ModelDB Client, we'll create an Experiment Run to encapsulate our work, and log our model as an artifact. End of explanation """ run.log_commit( commit, { 'notebook': "notebooks/tweet-analysis", 'hyperparameters': "config/hyperparams", 'training_data': "data/tweets", 'python_env': "env/python", }, ) """ Explanation: And finally, we'll link the commit we created earlier to the Experiment Run to complete our logged model version. End of explanation """
cipri-tom/Swiss-on-Amazon
filter_swiss_helpful_reviews.ipynb
gpl-3.0
%matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np import yaml """ Explanation: The following script extracts the (more) helpful reviews from the swiss reviews and saves them locally. From the extracted reviews it also saves a list with their asin identifiers. The list of asin identifiers will be later used to to find the average review rating for the respective products. End of explanation """ with open("data/swiss-reviews.txt", 'r') as fp: swiss_rev = fp.readlines() len(swiss_rev) swiss_rev[2] """ Explanation: Load the swiss reviews End of explanation """ def filter_helpful(line): l = line.rstrip('\n') l = yaml.load(l) if('helpful' in l.keys()): if(l['helpful'][1] >= 5): return True else: return False else: print("Review does not have helpful score key: "+line) return False """ Explanation: The filter_helpful function keeps only the reviews which had at least 5 flags/votes in the helpfulness field. This amounts to a subset of around 23000 reviews. A smaller subset of around 10000 reviews was obtained as well by only keeping reviews with 10 flags/votes. The main advantage of the smaller subset is that it contains better quality reviews while its drawback is, of course, the reduced size. 1) Extract the helpful reviews End of explanation """ def get_helpful(data): res = [] counter = 1 i = 0 for line in data: i += 1 if(filter_helpful(line)): if(counter % 1000 == 0): print("Count "+str(counter)+" / "+str(i)) counter += 1 res.append(line) return res swiss_reviews_helpful = get_helpful(swiss_rev) len(swiss_reviews_helpful) """ Explanation: Apply the filter_helpful to each swiss product review End of explanation """ write_file = open('data/swiss-reviews-helpful-correct-bigger.txt', 'w') for item in swiss_reviews_helpful: write_file.write(item) write_file.close() """ Explanation: Save the subset with helpful swiss product reviews End of explanation """ with open('data/swiss-reviews-helpful-correct-bigger.txt', 'r') as fp: swiss_reviews_helpful = fp.readlines() """ Explanation: 2) Extract the asins of the products which the helpful reviews correspond to End of explanation """ def filter_asin(line): l = line.rstrip('\n') l = yaml.load(l) if('asin' in l.keys()): return l['asin'] else: return '' helpful_asins = [] counter = 1 for item in swiss_reviews_helpful: if(counter%500 == 0): print(counter) counter += 1 x = filter_asin(item) if(len(x) > 0): helpful_asins.append(x) """ Explanation: The following function simply extracts the 'asin' from the helpful reviews. Repetitions of the asins are of no consequence, as the list is just meant to be a check up. End of explanation """ import pickle with open('data/helpful_asins_bigger.pickle', 'wb') as fp: pickle.dump(helpful_asins, fp) """ Explanation: Save the list of asins. End of explanation """
simonsfoundation/CaImAn
demos/notebooks/demo_Ring_CNN.ipynb
gpl-2.0
get_ipython().magic('load_ext autoreload') get_ipython().magic('autoreload 2') import glob import logging import numpy as np import os logging.basicConfig(format= "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", # filename="/tmp/caiman.log", level=logging.WARNING) import caiman as cm from caiman.source_extraction import cnmf as cnmf from caiman.utils.utils import download_demo import matplotlib.pyplot as plt import bokeh.plotting as bpl bpl.output_notebook() """ Explanation: Example of 1p online analysis using a Ring CNN + OnACID The demo shows how to perform online analysis on one photon data using a Ring CNN for extracting the background followed by processing using the OnACID algorithm. The algorithm relies on the usage a GPU to efficiently estimate and apply the background model so it is recommended to have access to a GPU when running this notebook. End of explanation """ fnames=download_demo('blood_vessel_10Hz.mat') """ Explanation: First specify the data file(s) to be analyzed The download_demo method will download the file (if not already present) and store it inside your caiman_data/example_movies folder. You can specify any path to files you want to analyze. End of explanation """ reuse_model = False # set to True to re-use an existing ring model path_to_model = None # specify a pre-trained model here if needed gSig = (7, 7) # expected half size of neurons gnb = 2 # number of background components for OnACID init_batch = 500 # number of frames for initialization and training params_dict = {'fnames': fnames, 'var_name_hdf5': 'Y', # name of variable inside mat file where the data is stored 'fr': 10, # frame rate (Hz) 'decay_time': 0.5, # approximate length of transient event in seconds 'gSig': gSig, 'p': 0, # order of AR indicator dynamics 'ring_CNN': True, # SET TO TRUE TO USE RING CNN 'min_SNR': 2.65, # minimum SNR for accepting new components 'SNR_lowest': 0.75, # reject components with SNR below this value 'use_cnn': False, # do not use CNN based test for components 'use_ecc': True, # test eccentricity 'max_ecc': 2.625, # reject components with eccentricity above this value 'rval_thr': 0.70, # correlation threshold for new component inclusion 'rval_lowest': 0.25, # reject components with corr below that value 'ds_factor': 1, # spatial downsampling factor (increases speed but may lose some fine structure) 'nb': gnb, 'motion_correct': False, # Flag for motion correction 'init_batch': init_batch, # number of frames for initialization (presumably from the first file) 'init_method': 'bare', 'normalize': False, 'expected_comps': 1100, # maximum number of expected components used for memory pre-allocation (exaggerate here) 'sniper_mode': False, # flag using a CNN to detect new neurons (o/w space correlation is used) 'dist_shape_update' : True, # flag for updating shapes in a distributed way 'min_num_trial': 5, # number of candidate components per frame 'epochs': 3, # number of total passes over the data 'stop_detection': True, # Run a last epoch without detecting new neurons 'K': 50, # initial number of components 'lr': 6e-4, 'lr_scheduler': [0.9, 6000, 10000], 'pct': 0.01, 'path_to_model': path_to_model, # where the ring CNN model is saved/loaded 'reuse_model': reuse_model # flag for re-using a ring CNN model } opts = cnmf.params.CNMFParams(params_dict=params_dict) """ Explanation: Set up some parameters Here we set up some parameters for specifying the ring model and running OnACID. We use the same params object as in batch processing with CNMF. End of explanation """ run_onacid = True if run_onacid: cnm = cnmf.online_cnmf.OnACID(params=opts) cnm.fit_online() fld_name = os.path.dirname(cnm.params.ring_CNN['path_to_model']) res_name_nm = os.path.join(fld_name, 'onacid_results_nm.hdf5') cnm.save(res_name_nm) # save initial results (without any postprocessing) else: fld_name = os.path.dirname(path_to_model) res_name = os.path.join(fld_name, 'onacid_results.hdf5') cnm = cnmf.online_cnmf.load_OnlineCNMF(res_name) cnm.params.data['fnames'] = fnames """ Explanation: Now run the Ring-CNN + CaImAn online algorithm (OnACID). The first initbatch frames are used for training the ring-CNN model. Once the model is trained the background is subtracted and the different is used for initialization purposes. The initialization method chosen here bare will only search for a small number of neurons and is mostly used to initialize the background components. Initialization with the full CNMF can also be used by choosing cnmf. We first create an OnACID object located in the module online_cnmf and we pass the parameters similarly to the case of batch processing. We then run the algorithm using the fit_online method. We then save the results inside the folder where the Ring_CNN model is saved. End of explanation """ ds = 10 # plot every ds frames to make more manageable figures init_batch = 500 dims, T = cnmf.utilities.get_file_size(fnames, var_name_hdf5='Y') T = np.array(T).sum() n_epochs = cnm.params.online['epochs'] T_detect = 1e3*np.hstack((np.zeros(init_batch), cnm.t_detect)) T_shapes = 1e3*np.hstack((np.zeros(init_batch), cnm.t_shapes)) T_online = 1e3*np.hstack((np.zeros(init_batch), cnm.t_online)) - T_detect - T_shapes plt.figure() plt.stackplot(np.arange(len(T_detect))[::ds], T_online[::ds], T_detect[::ds], T_shapes[::ds], colors=['tab:red', 'tab:purple', 'tab:brown']) plt.legend(labels=['process', 'detect', 'shapes'], loc=2) plt.title('Processing time allocation') plt.xlabel('Frame #') plt.ylabel('Processing time [ms]') max_val = 80 plt.ylim([0, max_val]); plt.plot([init_batch, init_batch], [0, max_val], '--k') for i in range(n_epochs - 1): plt.plot([(i+1)*T, (i+1)*T], [0, max_val], '--k') plt.xlim([0, n_epochs*T]); plt.savefig(os.path.join(fld_name, 'time_per_frame_ds.pdf'), bbox_inches='tight', pad_inches=0) init_batch = 500 plt.figure() tc_init = cnm.t_init*np.ones(T*n_epochs) ds = 10 #tc_mot = np.hstack((np.zeros(init_batch), np.cumsum(T_motion)/1000)) tc_prc = np.cumsum(T_online)/1000#np.hstack((np.zeros(init_batch), )) tc_det = np.cumsum(T_detect)/1000#np.hstack((np.zeros(init_batch), )) tc_shp = np.cumsum(T_shapes)/1000#np.hstack((np.zeros(init_batch), )) plt.stackplot(np.arange(len(tc_init))[::ds], tc_init[::ds], tc_prc[::ds], tc_det[::ds], tc_shp[::ds], colors=['g', 'tab:red', 'tab:purple', 'tab:brown']) plt.legend(labels=['initialize', 'process', 'detect', 'shapes'], loc=2) plt.title('Processing time allocation') plt.xlabel('Frame #') plt.ylabel('Processing time [s]') max_val = (tc_prc[-1] + tc_det[-1] + tc_shp[-1] + cnm.t_init)*1.05 for i in range(n_epochs - 1): plt.plot([(i+1)*T, (i+1)*T], [0, max_val], '--k') plt.xlim([0, n_epochs*T]); plt.ylim([0, max_val]) plt.savefig(os.path.join(fld_name, 'time_cumulative_ds.pdf'), bbox_inches='tight', pad_inches=0) print('Cost of estimating model and running first epoch: {:.2f}s'.format(tc_prc[T] + tc_det[T] + tc_shp[T] + tc_init[T])) """ Explanation: Check speed Create some plots that show the speed per frame and cumulatively End of explanation """ # first compute background summary images images = cm.load(fnames, var_name_hdf5='Y', subindices=slice(None, None, 2)) cn_filter, pnr = cm.summary_images.correlation_pnr(images, gSig=3, swap_dim=False) # change swap dim if output looks weird, it is a problem with tiffile plt.figure(figsize=(15, 7)) plt.subplot(1,2,1); plt.imshow(cn_filter); plt.colorbar() plt.subplot(1,2,2); plt.imshow(pnr); plt.colorbar() cnm.estimates.plot_contours_nb(img=cn_filter, idx=cnm.estimates.idx_components, line_color='white', thr=0.3) """ Explanation: Do some initial plotting End of explanation """ cnm.estimates.nb_view_components(img=cn_filter, denoised_color='red') """ Explanation: View components Now inspect the components extracted by OnACID. Note that if single pass was used then several components would be non-zero only for the part of the time interval indicating that they were detected online by OnACID. Note that if you get data rate error you can start Jupyter notebooks using: 'jupyter notebook --NotebookApp.iopub_data_rate_limit=1.0e10' End of explanation """ save_file = True if save_file: from caiman.utils.nn_models import create_LN_model model_LN = create_LN_model(images, shape=opts.data['dims'] + (1,), n_channels=opts.ring_CNN['n_channels'], width=opts.ring_CNN['width'], use_bias=opts.ring_CNN['use_bias'], gSig=gSig[0], use_add=opts.ring_CNN['use_add']) model_LN.load_weights(cnm.params.ring_CNN['path_to_model']) # Load the data in batches and save them m = [] saved_files = [] batch_length = 256 for i in range(0, T, batch_length): images = cm.load(fnames, var_name_hdf5='Y', subindices=slice(i, i + batch_length)) images_filt = np.squeeze(model_LN.predict(np.expand_dims(images, axis=-1))) temp_file = os.path.join(fld_name, 'pfc_back_removed_' + format(i, '05d') + '.h5') saved_files.append(temp_file) m = cm.movie(np.maximum(images - images_filt, 0)) m.save(temp_file) else: saved_files = glob.glob(os.path.join(fld_name, 'pfc_back_removed_*')) saved_files.sort() fname_mmap = cm.save_memmap([saved_files], order='C', border_to_0=0) Yr, dims, T = cm.load_memmap(fname_mmap) images_mmap = Yr.T.reshape((T,) + dims, order='F') """ Explanation: Load ring model to filter the data Filter the data with the learned Ring CNN model and a create memory mapped file with the background subtracted data. We will use this to run the quality tests and screen for false positive components. End of explanation """ cnm.params.merging['merge_thr'] = 0.7 cnm.estimates.c1 = np.zeros(cnm.estimates.A.shape[-1]) cnm.estimates.bl = np.zeros(cnm.estimates.A.shape[-1]) cnm.estimates.neurons_sn = np.zeros(cnm.estimates.A.shape[-1]) cnm.estimates.g = None #np.ones((cnm.estimates.A.shape[-1], 1))*.9 cnm.estimates.merge_components(Yr, cnm.params) """ Explanation: Merge components End of explanation """ cnm.params.quality cnm.estimates.evaluate_components(imgs=images_mmap, params=cnm.params) cnm.estimates.plot_contours_nb(img=cn_filter, idx=cnm.estimates.idx_components, line_color='white') cnm.estimates.nb_view_components(idx=cnm.estimates.idx_components, img=cn_filter) """ Explanation: Evaluate components and compare again We run the component evaluation tests to screen for false positive components. End of explanation """ cnmfe_results = download_demo('online_vs_offline.npz') locals().update(np.load(cnmfe_results, allow_pickle=True)) A_patch_good = A_patch_good.item() estimates_gt = cnmf.estimates.Estimates(A=A_patch_good, C=C_patch_good, dims=dims) maxthr=0.01 cnm.estimates.A_thr=None cnm.estimates.threshold_spatial_components(maxthr=maxthr) estimates_gt.A_thr=None estimates_gt.threshold_spatial_components(maxthr=maxthr*10) min_size = np.pi*(gSig[0]/1.5)**2 max_size = np.pi*(gSig[0]*1.5)**2 ntk = cnm.estimates.remove_small_large_neurons(min_size_neuro=min_size, max_size_neuro=2*max_size) gtk = estimates_gt.remove_small_large_neurons(min_size_neuro=min_size, max_size_neuro=2*max_size) m1, m2, nm1, nm2, perf = cm.base.rois.register_ROIs(estimates_gt.A_thr[:, estimates_gt.idx_components], cnm.estimates.A_thr[:, cnm.estimates.idx_components], dims, align_flag=False, thresh_cost=.7, plot_results=True, Cn=cn_filter, enclosed_thr=None)[:-1] """ Explanation: Compare against CNMF-E results We download the results of CNMF-E on the same dataset and compare. End of explanation """ for k, v in perf.items(): print(k + ':', '%.4f' % v, end=' ') """ Explanation: Print performance results End of explanation """ res_name = os.path.join(fld_name, 'onacid_results.hdf5') cnm.save(res_name) """ Explanation: Save the results End of explanation """ import matplotlib.lines as mlines lp, hp = np.nanpercentile(cn_filter, [5, 98]) A_onacid = cnm.estimates.A_thr.toarray().copy() A_onacid /= A_onacid.max(0) A_TP = estimates_gt.A[:, m1].toarray() #cnm.estimates.A[:, cnm.estimates.idx_components[m2]].toarray() A_TP = A_TP.reshape(dims + (-1,), order='F').transpose(2,0,1) A_FN = estimates_gt.A[:, nm1].toarray() A_FN = A_FN.reshape(dims + (-1,), order='F').transpose(2,0,1) A_FP = A_onacid[:,cnm.estimates.idx_components[nm2]] A_FP = A_FP.reshape(dims + (-1,), order='F').transpose(2,0,1) plt.figure(figsize=(15, 12)) plt.imshow(cn_filter, vmin=lp, vmax=hp, cmap='viridis') plt.colorbar(); for aa in A_TP: plt.contour(aa, [0.05], colors='k'); for aa in A_FN: plt.contour(aa, [0.05], colors='r'); for aa in A_FP: plt.contour(aa, [0.25], colors='w'); cl = ['k', 'r', 'w'] lb = ['both', 'CNMF-E only', 'ring CNN only'] day = [mlines.Line2D([], [], color=cl[i], label=lb[i]) for i in range(3)] plt.legend(handles=day, loc=3) plt.axis('off'); plt.margins(0, 0); plt.savefig(os.path.join(fld_name, 'ring_CNN_contours_gSig_3.pdf'), bbox_inches='tight', pad_inches=0) A_rej = cnm.estimates.A[:, cnm.estimates.idx_components_bad].toarray() A_rej = A_rej.reshape(dims + (-1,), order='F').transpose(2,0,1) plt.figure(figsize=(15, 15)) plt.imshow(cn_filter, vmin=lp, vmax=hp, cmap='viridis') plt.title('Rejected Components') for aa in A_rej: plt.contour(aa, [0.05], colors='w'); """ Explanation: Make some plots End of explanation """ from caiman.utils.nn_models import create_LN_model model_LN = create_LN_model(images, shape=opts.data['dims'] + (1,), n_channels=opts.ring_CNN['n_channels'], width=opts.ring_CNN['width'], use_bias=opts.ring_CNN['use_bias'], gSig=gSig[0], use_add=opts.ring_CNN['use_add']) model_LN.load_weights(cnm.params.ring_CNN['path_to_model']) W = model_LN.get_weights() plt.figure(figsize=(10, 10)) plt.subplot(2,2,1); plt.imshow(np.squeeze(W[0][:,:,:,0])); plt.colorbar(); plt.title('Ring Kernel 1') plt.subplot(2,2,2); plt.imshow(np.squeeze(W[0][:,:,:,1])); plt.colorbar(); plt.title('Ring Kernel 2') plt.subplot(2,2,3); plt.imshow(np.squeeze(W[-1][:,:,0])); plt.colorbar(); plt.title('Multiplicative Layer 1') plt.subplot(2,2,4); plt.imshow(np.squeeze(W[-1][:,:,1])); plt.colorbar(); plt.title('Multiplicative Layer 2'); """ Explanation: Show the learned filters End of explanation """ m1 = cm.load(fnames, var_name_hdf5='Y') # original data m2 = cm.load(fname_mmap) # background subtracted data m3 = m1 - m2 # estimated background m4 = cm.movie(cnm.estimates.A[:,cnm.estimates.idx_components].dot(cnm.estimates.C[cnm.estimates.idx_components])).reshape(dims + (T,)).transpose(2,0,1) # estimated components nn = 0.01 mm = 1 - nn/4 # normalize movies by quantiles m1 = (m1 - np.quantile(m1[:1000], nn))/(np.quantile(m1[:1000], mm) - np.quantile(m1[:1000], nn)) m2 = (m2 - np.quantile(m2[:1000], nn))/(np.quantile(m2[:1000], mm) - np.quantile(m2[:1000], nn)) m3 = (m3 - np.quantile(m3[:1000], nn))/(np.quantile(m3[:1000], mm) - np.quantile(m3[:1000], nn)) m4 = (m4 - np.quantile(m4[:1000], nn))/(np.quantile(m4[:1000], mm) - np.quantile(m4[:1000], nn)) m = cm.concatenate((cm.concatenate((m1.transpose(0,2,1), m3.transpose(0,2,1)), axis=2), cm.concatenate((m2.transpose(0,2,1), m4), axis=2)), axis=1) m[:3000].play(magnification=2, q_min=1, plot_text=True, save_movie=True, movie_name=os.path.join(fld_name, 'movie.avi')) """ Explanation: Make a movie End of explanation """
Kaggle/learntools
notebooks/deep_learning_intro/raw/tut3.ipynb
apache-2.0
#$HIDE_INPUT$ import pandas as pd from IPython.display import display red_wine = pd.read_csv('../input/dl-course-data/red-wine.csv') # Create training and validation splits df_train = red_wine.sample(frac=0.7, random_state=0) df_valid = red_wine.drop(df_train.index) display(df_train.head(4)) # Scale to [0, 1] max_ = df_train.max(axis=0) min_ = df_train.min(axis=0) df_train = (df_train - min_) / (max_ - min_) df_valid = (df_valid - min_) / (max_ - min_) # Split features and target X_train = df_train.drop('quality', axis=1) X_valid = df_valid.drop('quality', axis=1) y_train = df_train['quality'] y_valid = df_valid['quality'] """ Explanation: Introduction In the first two lessons, we learned how to build fully-connected networks out of stacks of dense layers. When first created, all of the network's weights are set randomly -- the network doesn't "know" anything yet. In this lesson we're going to see how to train a neural network; we're going to see how neural networks learn. As with all machine learning tasks, we begin with a set of training data. Each example in the training data consists of some features (the inputs) together with an expected target (the output). Training the network means adjusting its weights in such a way that it can transform the features into the target. In the 80 Cereals dataset, for instance, we want a network that can take each cereal's 'sugar', 'fiber', and 'protein' content and produce a prediction for that cereal's 'calories'. If we can successfully train a network to do that, its weights must represent in some way the relationship between those features and that target as expressed in the training data. In addition to the training data, we need two more things: - A "loss function" that measures how good the network's predictions are. - An "optimizer" that can tell the network how to change its weights. The Loss Function We've seen how to design an architecture for a network, but we haven't seen how to tell a network what problem to solve. This is the job of the loss function. The loss function measures the disparity between the the target's true value and the value the model predicts. Different problems call for different loss functions. We have been looking at regression problems, where the task is to predict some numerical value -- calories in 80 Cereals, rating in Red Wine Quality. Other regression tasks might be predicting the price of a house or the fuel efficiency of a car. A common loss function for regression problems is the mean absolute error or MAE. For each prediction y_pred, MAE measures the disparity from the true target y_true by an absolute difference abs(y_true - y_pred). The total MAE loss on a dataset is the mean of all these absolute differences. <figure style="padding: 1em;"> <img src="https://i.imgur.com/VDcvkZN.png" width="500" alt="A graph depicting error bars from data points to the fitted line.."> <figcaption style="textalign: center; font-style: italic"><center>The mean absolute error is the average length between the fitted curve and the data points. </center></figcaption> </figure> Besides MAE, other loss functions you might see for regression problems are the mean-squared error (MSE) or the Huber loss (both available in Keras). During training, the model will use the loss function as a guide for finding the correct values of its weights (lower loss is better). In other words, the loss function tells the network its objective. The Optimizer - Stochastic Gradient Descent We've described the problem we want the network to solve, but now we need to say how to solve it. This is the job of the optimizer. The optimizer is an algorithm that adjusts the weights to minimize the loss. Virtually all of the optimization algorithms used in deep learning belong to a family called stochastic gradient descent. They are iterative algorithms that train a network in steps. One step of training goes like this: 1. Sample some training data and run it through the network to make predictions. 2. Measure the loss between the predictions and the true values. 3. Finally, adjust the weights in a direction that makes the loss smaller. Then just do this over and over until the loss is as small as you like (or until it won't decrease any further.) <figure style="padding: 1em;"> <img src="https://i.imgur.com/rFI1tIk.gif" width="1600" alt="Fitting a line batch by batch. The loss decreases and the weights approach their true values."> <figcaption style="textalign: center; font-style: italic"><center>Training a neural network with Stochastic Gradient Descent. </center></figcaption> </figure> Each iteration's sample of training data is called a minibatch (or often just "batch"), while a complete round of the training data is called an epoch. The number of epochs you train for is how many times the network will see each training example. The animation shows the linear model from Lesson 1 being trained with SGD. The pale red dots depict the entire training set, while the solid red dots are the minibatches. Every time SGD sees a new minibatch, it will shift the weights (w the slope and b the y-intercept) toward their correct values on that batch. Batch after batch, the line eventually converges to its best fit. You can see that the loss gets smaller as the weights get closer to their true values. Learning Rate and Batch Size Notice that the line only makes a small shift in the direction of each batch (instead of moving all the way). The size of these shifts is determined by the learning rate. A smaller learning rate means the network needs to see more minibatches before its weights converge to their best values. The learning rate and the size of the minibatches are the two parameters that have the largest effect on how the SGD training proceeds. Their interaction is often subtle and the right choice for these parameters isn't always obvious. (We'll explore these effects in the exercise.) Fortunately, for most work it won't be necessary to do an extensive hyperparameter search to get satisfactory results. Adam is an SGD algorithm that has an adaptive learning rate that makes it suitable for most problems without any parameter tuning (it is "self tuning", in a sense). Adam is a great general-purpose optimizer. Adding the Loss and Optimizer After defining a model, you can add a loss function and optimizer with the model's compile method: model.compile( optimizer="adam", loss="mae", ) Notice that we are able to specify the loss and optimizer with just a string. You can also access these directly through the Keras API -- if you wanted to tune parameters, for instance -- but for us, the defaults will work fine. <blockquote style="margin-right:auto; margin-left:auto; background-color: #ebf9ff; padding: 1em; margin:24px;"> <strong>What's In a Name?</strong><br> The <strong>gradient</strong> is a vector that tells us in what direction the weights need to go. More precisely, it tells us how to change the weights to make the loss change <em>fastest</em>. We call our process gradient <strong>descent</strong> because it uses the gradient to <em>descend</em> the loss curve towards a minimum. <strong>Stochastic</strong> means "determined by chance." Our training is <em>stochastic</em> because the minibatches are <em>random samples</em> from the dataset. And that's why it's called SGD! </blockquote> Example - Red Wine Quality Now we know everything we need to start training deep learning models. So let's see it in action! We'll use the Red Wine Quality dataset. This dataset consists of physiochemical measurements from about 1600 Portuguese red wines. Also included is a quality rating for each wine from blind taste-tests. How well can we predict a wine's perceived quality from these measurements? We've put all of the data preparation into this next hidden cell. It's not essential to what follows so feel free to skip it. One thing you might note for now though is that we've rescaled each feature to lie in the interval $[0, 1]$. As we'll discuss more in Lesson 5, neural networks tend to perform best when their inputs are on a common scale. End of explanation """ print(X_train.shape) """ Explanation: How many inputs should this network have? We can discover this by looking at the number of columns in the data matrix. Be sure not to include the target ('quality') here -- only the input features. End of explanation """ from tensorflow import keras from tensorflow.keras import layers model = keras.Sequential([ layers.Dense(512, activation='relu', input_shape=[11]), layers.Dense(512, activation='relu'), layers.Dense(512, activation='relu'), layers.Dense(1), ]) """ Explanation: Eleven columns means eleven inputs. We've chosen a three-layer network with over 1500 neurons. This network should be capable of learning fairly complex relationships in the data. End of explanation """ model.compile( optimizer='adam', loss='mae', ) """ Explanation: Deciding the architecture of your model should be part of a process. Start simple and use the validation loss as your guide. You'll learn more about model development in the exercises. After defining the model, we compile in the optimizer and loss function. End of explanation """ history = model.fit( X_train, y_train, validation_data=(X_valid, y_valid), batch_size=256, epochs=10, ) """ Explanation: Now we're ready to start the training! We've told Keras to feed the optimizer 256 rows of the training data at a time (the batch_size) and to do that 10 times all the way through the dataset (the epochs). End of explanation """ import pandas as pd # convert the training history to a dataframe history_df = pd.DataFrame(history.history) # use Pandas native plot method history_df['loss'].plot(); """ Explanation: You can see that Keras will keep you updated on the loss as the model trains. Often, a better way to view the loss though is to plot it. The fit method in fact keeps a record of the loss produced during training in a History object. We'll convert the data to a Pandas dataframe, which makes the plotting easy. End of explanation """
GoogleCloudPlatform/mlops-on-gcp
model_serving/caip-load-testing/03-analyze-results.ipynb
apache-2.0
import time from datetime import datetime from typing import List import numpy as np import pandas as pd import google.auth from google.cloud import logging_v2 from google.cloud.monitoring_dashboard.v1 import DashboardsServiceClient from google.cloud.logging_v2 import MetricsServiceV2Client from google.cloud.monitoring_v3.query import Query from google.cloud.monitoring_v3 import MetricServiceClient import matplotlib.pyplot as plt """ Explanation: Analyzing Locust Load Testing Results This Notebook demonstrates how to analyze AI Platform Prediction load testing runs using metrics captured in Cloud Monitoring. This Notebook build on the 02-perf-testing.ipynb notebook that shows how to configure and run load tests against AI Platform Prediction using Locust.io. The outlined testing process results in a Pandas dataframe that aggregates the standard AI Platform Prediction metrics with a set of custom, log-based metrics generated from log entries captured by the Locust testing script. The Notebook covers the following steps: 1. Retrieve and consolidate test results from Cloud Monitoring 2. Analyze and visualize utilization and latency results Setup This notebook was tested on AI Platform Notebooks using the standard TF 2.2 image. Import libraries End of explanation """ PROJECT_ID = '[your-project-id]' # Set your project Id MODEL_NAME = 'resnet_classifier' MODEL_VERSION = 'v1' LOG_NAME = 'locust' # Set your log name TEST_ID = 'test-20200829-190943' # Set your test Id TEST_START_TIME = datetime.fromisoformat('2020-08-28T21:30:00-00:00') # Set your test start time TEST_END_TIME = datetime.fromisoformat('2020-08-29T22:00:00-00:00') # Set your test end time """ Explanation: Configure GCP environment settings End of explanation """ creds , _ = google.auth.default() client = MetricServiceClient(credentials=creds) project_path = client.project_path(PROJECT_ID) filter = 'metric.type=starts_with("ml.googleapis.com/prediction")' for descriptor in client.list_metric_descriptors(project_path, filter_=filter): print(descriptor.type) """ Explanation: 1. Retrieve and consolidate test results Locust's web interface along with a Cloud Monitoring dashboard provide a cursory view into performance of a tested AI Platform Prediction model version. A more thorough analysis can be performed by consolidating metrics collected during a test and using data analytics and visualization tools. In this section, you will retrieve the metrics captured in Cloud Monitoring and consolidate them into a single Pandas dataframe. 1.1 List available AI Platform Prediction metrics End of explanation """ filter = 'metric.type=starts_with("logging.googleapis.com/user")' for descriptor in client.list_metric_descriptors(project_path, filter_=filter): print(descriptor.type) """ Explanation: 1.2. List custom log based metrics End of explanation """ def retrieve_metrics(client, project_id, start_time, end_time, model, model_version, test_id, log_name): """ Retrieves test metrics from Cloud Monitoring. """ def _get_aipp_metric(metric_type: str, labels: List[str]=[], metric_name=None)-> pd.DataFrame: """ Retrieves a specified AIPP metric. """ query = Query(client, project_id, metric_type=metric_type) query = query.select_interval(end_time, start_time) query = query.select_resources(model_id=model) query = query.select_resources(version_id=model_version) if metric_name: labels = ['metric'] + labels df = query.as_dataframe(labels=labels) if not df.empty: if metric_name: df.columns.set_levels([metric_name], level=0, inplace=True) df = df.set_index(df.index.round('T')) return df def _get_locust_metric(metric_type: str, labels: List[str]=[], metric_name=None)-> pd.DataFrame: """ Retrieves a specified custom log-based metric. """ query = Query(client, project_id, metric_type=metric_type) query = query.select_interval(end_time, start_time) query = query.select_metrics(log=log_name) query = query.select_metrics(test_id=test_id) if metric_name: labels = ['metric'] + labels df = query.as_dataframe(labels=labels) if not df.empty: if metric_name: df.columns.set_levels([metric_name], level=0, inplace=True) df = df.apply(lambda row: [metric.mean for metric in row]) df = df.set_index(df.index.round('T')) return df # Retrieve GPU duty cycle metric_type = 'ml.googleapis.com/prediction/online/accelerator/duty_cycle' metric = _get_aipp_metric(metric_type, ['replica_id', 'signature'], 'duty_cycle') df = metric # Retrieve CPU utilization metric_type = 'ml.googleapis.com/prediction/online/cpu/utilization' metric = _get_aipp_metric(metric_type, ['replica_id', 'signature'], 'cpu_utilization') if not metric.empty: df = df.merge(metric, how='outer', right_index=True, left_index=True) # Retrieve prediction count metric_type = 'ml.googleapis.com/prediction/prediction_count' metric = _get_aipp_metric(metric_type, ['replica_id', 'signature'], 'prediction_count') if not metric.empty: df = df.merge(metric, how='outer', right_index=True, left_index=True) # Retrieve responses per second metric_type = 'ml.googleapis.com/prediction/response_count' metric = _get_aipp_metric(metric_type, ['replica_id', 'signature'], 'response_rate') if not metric.empty: metric = (metric/60).round(2) df = df.merge(metric, how='outer', right_index=True, left_index=True) # Retrieve backend latencies metric_type = 'ml.googleapis.com/prediction/latencies' metric = _get_aipp_metric(metric_type, ['latency_type', 'replica_id', 'signature']) if not metric.empty: metric = metric.apply(lambda row: [round(latency.mean/1000,1) for latency in row]) metric.columns.set_names(['metric', 'replica_id', 'signature'], inplace=True) level_values = ['Latency: ' + value for value in metric.columns.get_level_values(level=0)] metric.columns.set_levels(level_values, level=0, inplace=True) df = df.merge(metric, how='outer', right_index=True, left_index=True) # Retrieve Locust latency metric_type = 'logging.googleapis.com/user/locust_latency' metric = _get_locust_metric(metric_type, ['replica_id', 'signature'], 'Latency: client') if not metric.empty: metric = metric.round(2).replace([0], np.nan) df = df.merge(metric, how='outer', right_index=True, left_index=True) # Retrieve Locust user count metric_type = 'logging.googleapis.com/user/locust_users' metric = _get_locust_metric(metric_type, ['replica_id', 'signature'], 'User count') if not metric.empty: metric = metric.round() df = df.merge(metric, how='outer', right_index=True, left_index=True) # Retrieve Locust num_failures metric_type = 'logging.googleapis.com/user/num_failures' metric = _get_locust_metric(metric_type, ['replica_id', 'signature'], 'Num of failures') if not metric.empty: metric = metric.round() df = df.merge(metric, how='outer', right_index=True, left_index=True) # Retrieve Locust num_failures metric_type = 'logging.googleapis.com/user/num_requests' metric = _get_locust_metric(metric_type, ['replica_id', 'signature'], 'Num of requests') if not metric.empty: metric = metric.round() df = df.merge(metric, how='outer', right_index=True, left_index=True) return df test_result = retrieve_metrics( client, PROJECT_ID, TEST_START_TIME, TEST_END_TIME, MODEL_NAME, MODEL_VERSION, TEST_ID, LOG_NAME ) test_result.head().T """ Explanation: 1.3. Retrieve test metrics Define a helper function that retrieves test metrics from Cloud Monitoring End of explanation """ gpu_utilization_results = test_result['duty_cycle'] gpu_utilization_results.columns = gpu_utilization_results.columns.get_level_values(0) ax = gpu_utilization_results.plot(figsize=(14, 9), legend=True) ax.set_xlabel('Time', fontsize=16) ax.set_ylabel('Utilization ratio', fontsize=16) _ = ax.set_title("GPU Utilization", fontsize=20) """ Explanation: The retrieved dataframe uses hierarchical indexing for column names. The reason is that some metrics contain multiple time series. For example, the GPU duty_cycle metric includes a time series of measures per each GPU used in the deployment (denoted as replica_id). The top level of the column index is a metric name. The second level is a replica_id. The third level is a signature of a model. All metrics are aligned on the same timeline. 2. Analyzing and Visualizing test results In the context of our scenario the key concern is GPU utilization at various levels of throughput and latency. The primary metric exposed by AI Platform Prediction to monitor GPU utilization is duty cycle. This metric captures an average fraction of time over the 60 second period during which the accelerator(s) were actively processing. 2.1. GPU utilization End of explanation """ cpu_utilization_results = test_result['cpu_utilization'] cpu_utilization_results.columns = cpu_utilization_results.columns.get_level_values(0) ax = cpu_utilization_results.plot(figsize=(14, 9), legend=True) ax.set_xlabel('Time', fontsize=16) ax.set_ylabel('Utilization ratio', fontsize=16) _ = ax.set_title("CPU Utilization", fontsize=20) """ Explanation: 2.2. CPU utilization End of explanation """ latency_results = test_result[['Latency: model', 'Latency: client']] latency_results.columns = latency_results.columns.get_level_values(0) ax = latency_results.plot(figsize=(14, 9), legend=True) ax.set_xlabel('Time', fontsize=16) ax.set_ylabel('milisecond', fontsize=16) _ = ax.set_title("Latency", fontsize=20) """ Explanation: 2.3. Latency End of explanation """ throughput_results = test_result[['response_rate', 'User count']] throughput_results.columns = throughput_results.columns.get_level_values(0) ax = throughput_results.plot(figsize=(14, 9), legend=True) ax.set_xlabel('Time', fontsize=16) ax.set_ylabel('Count', fontsize=16) _ = ax.set_title("Response Rate vs User Count", fontsize=20) """ Explanation: 2.4. Request throughput We are going to use the response_rate metric, which tracks a number of responses returned by AI Platform Prediction over a 1 minute interval. End of explanation """ logging_client = MetricsServiceV2Client(credentials=creds) parent = logging_client.project_path(PROJECT_ID) for element in logging_client.list_log_metrics(parent): metric_path = logging_client.metric_path(PROJECT_ID, element.name) logging_client.delete_log_metric(metric_path) print("Deleted metric: ", metric_path) display_name = 'AI Platform Prediction and Locust' dashboard_service_client = DashboardsServiceClient(credentials=creds) parent = 'projects/{}'.format(PROJECT_ID) for dashboard in dashboard_service_client.list_dashboards(parent): if dashboard.display_name == display_name: dashboard_service_client.delete_dashboard(dashboard.name) print("Deleted dashboard:", dashboard.name) """ Explanation: Cleaning up: delete the log-based metrics and dasboard End of explanation """
Neuroglycerin/neukrill-net-work
notebooks/augmentation/Preliminary Online Augmentation Results.ipynb
mit
import pylearn2.utils import pylearn2.config import theano import neukrill_net.dense_dataset import neukrill_net.utils import numpy as np %matplotlib inline import matplotlib.pyplot as plt import holoviews as hl %load_ext holoviews.ipython import sklearn.metrics cd .. settings = neukrill_net.utils.Settings("settings.json") run_settings = neukrill_net.utils.load_run_settings( "run_settings/replicate_8aug.json", settings, force=True) model = pylearn2.utils.serial.load(run_settings['alt_picklepath']) c = 'train_objective' channel = model.monitor.channels[c] """ Explanation: The following are the results we've got from online augmentation so far. Some bugs have been fixed by Scott since then so these might be redundant. If they're not redundant then they are very bad. Loading the pickle End of explanation """ plt.title(c) plt.plot(channel.example_record,channel.val_record) c = 'train_y_nll' channel = model.monitor.channels[c] plt.title(c) plt.plot(channel.example_record,channel.val_record) def plot_monitor(c = 'valid_y_nll'): channel = model.monitor.channels[c] plt.title(c) plt.plot(channel.example_record,channel.val_record) return None plot_monitor() plot_monitor(c="valid_objective") """ Explanation: Replicating 8aug The DensePNGDataset run with 8 augmentations got us most of the way to our best score in one go. If we can replicate that results with online augmentation then we can be pretty confident that online augmentation is a good idea. Unfortunately, it looks like we can't: End of explanation """ %run check_test_score.py run_settings/replicate_8aug.json """ Explanation: Would actually like to know what kind of score this model gets on the check_test_score script. End of explanation """ run_settings = neukrill_net.utils.load_run_settings( "run_settings/online_manyaug.json", settings, force=True) model = pylearn2.utils.serial.load(run_settings['alt_picklepath']) plot_monitor(c="valid_objective") """ Explanation: So we can guess that the log loss score we're seeing is in fact correct. There are definitely some bugs in the ListDataset code. Many Augmentations We want to be able to use online augmentations to run large combinations of different augmentations on the images. This model had almost everything turned on, a little: End of explanation """ settings = neukrill_net.utils.Settings("settings.json") run_settings = neukrill_net.utils.load_run_settings( "run_settings/alexnet_based_onlineaug.json", settings, force=True) model = pylearn2.utils.serial.load(run_settings['pickle abspath']) plot_monitor(c="train_y_nll") plot_monitor(c="valid_y_nll") plot_monitor(c="train_objective") plot_monitor(c="valid_objective") """ Explanation: Looks like it's completely incapable of learning. These problems suggest that the augmentation might be garbling the images; making them useless for learning from. Or worse, garbling the order so each image doesn't correspond to its label. Transformer Results We also have results from a network trained using a Transformer dataset, which is how online augmentation is supposed to be supported in Pylearn2. End of explanation """
AEW2015/PYNQ_PR_Overlay
Pynq-Z1/notebooks/examples/tracebuffer_i2c.ipynb
bsd-3-clause
from pprint import pprint from time import sleep from pynq import PL from pynq import Overlay from pynq.drivers import Trace_Buffer from pynq.iop import Pmod_TMP2 from pynq.iop import PMODA from pynq.iop import PMODB from pynq.iop import ARDUINO ol = Overlay("base.bit") ol.download() pprint(PL.ip_dict) """ Explanation: Trace Buffer - Tracing IIC Transactions The Trace_Buffer class can monitor the waveform and transations on PMODA, PMODB, and ARDUINO connectors. This demo shows how to use this class to track IIC transactions. For this demo, users have to connect the Pmod TMP2 sensor to PMODA. Step 1: Overlay Management Users have to import all the necessary classes. Make sure to use the right bitstream. End of explanation """ tmp2 = Pmod_TMP2(PMODA) tmp2.set_log_interval_ms(1) """ Explanation: Step 2: Instantiating Temperature Sensor Although this demo can also be done on PMODB, we use PMODA in this demo. Set the log interval to be 1ms. This means the IO Processor (IOP) will read temperature values every 1ms. End of explanation """ tr_buf = Trace_Buffer(PMODA,"i2c",samplerate=1000000) # Start the trace buffer tr_buf.start() # Issue reads for 1 second tmp2.start_log() sleep(1) tmp2_log = tmp2.get_log() # Stop the trace buffer tr_buf.stop() """ Explanation: Step 3: Tracking Transactions Instantiating the trace buffer with IIC protocol. The sample rate is set to 1MHz. Although the IIC clock is only 100kHz, we still have to use higher sample rate to keep track of IIC control signals from IOP. After starting the trace buffer DMA, also start to issue IIC reads for 1 second. Then stop the trace buffer DMA. End of explanation """ # Configuration for PMODA start = 600 stop = 10000 tri_sel=[0x40000,0x80000] tri_0=[0x4,0x8] tri_1=[0x400,0x800] mask = 0x0 # Parsing and decoding tr_buf.parse("i2c_trace.csv", start,stop,mask,tri_sel,tri_0,tri_1) tr_buf.set_metadata(['SDA','SCL']) tr_buf.decode("i2c_trace.pd") """ Explanation: Step 4: Parsing and Decoding Transactions The trace buffer object is able to parse the transactions into a *.csv file (saved into the same folder as this script). The input arguments for the parsing method is: * start : the starting sample number of the trace. * stop : the stopping sample number of the trace. * tri_sel: masks for tri-state selection bits. * tri_0: masks for pins selected when the corresponding tri_sel = 0. * tri_0: masks for pins selected when the corresponding tri_sel = 1. * mask: mask for pins selected always. For PMODB, the configuration of the masks can be: * tri_sel=[0x40000<<32,0x80000<<32] * tri_0=[0x4<<32,0x8<<32] * tri_1=[0x400<<32,0x800<<32] * mask = 0x0 Then the trace buffer object can also decode the transactions using the open-source sigrok decoders. The decoded file (*.pd) is saved into the same folder as this script. Reference: https://sigrok.org/wiki/Main_Page End of explanation """ s0 = 1 s1 = 5000 tr_buf.display(s0,s1) """ Explanation: Step 5: Displaying the Result The final waveform and decoded transactions are shown using the open-source wavedrom library. The two input arguments (s0 and s1 ) indicate the starting and stopping location where the waveform is shown. The valid range for s0 and s1 is: 0 &lt; s0 &lt; s1 &lt; (stop-start), where start and stop are defined in the last step. Reference: https://www.npmjs.com/package/wavedrom End of explanation """
rnder/data-science-from-scratch
notebook/ch21_network_analysis.ipynb
unlicense
from __future__ import division import math, random, re from collections import defaultdict, Counter, deque from linear_algebra import dot, get_row, get_column, make_matrix, magnitude, scalar_multiply, shape, distance from functools import partial users = [ { "id": 0, "name": "Hero" }, { "id": 1, "name": "Dunn" }, { "id": 2, "name": "Sue" }, { "id": 3, "name": "Chi" }, { "id": 4, "name": "Thor" }, { "id": 5, "name": "Clive" }, { "id": 6, "name": "Hicks" }, { "id": 7, "name": "Devin" }, { "id": 8, "name": "Kate" }, { "id": 9, "name": "Klein" } ] """ Explanation: 21장 넀튞워크 분석 많은 데읎터 묞제는 녾드(node)와 ê·ž 사읎륌 연결하는 엣지(edge)로 구성된 넀튞워크(network)의 ꎀ점에서 볌 수 있닀. 예륌듀얎, 페읎슀북에서는 사용자가 녞드띌멎 귞듀의 친구 ꎀ계는 엣지가 된닀. 웹에서는 각 웹페읎지가 녞드읎고 페읎지 사읎륌 연결하는 하읎퍌링크가 엣지가 된닀. 페읎슀북의 친구 ꎀ계는 상혞적읎닀. ë‚Žê°€ 당신곌 친구띌멎 당신은 반드시 나와 친구읎닀. 슉, 읎런 겜우륌 엣지에 방향읎 없닀(undirected)ê³  한닀. 반멎 하읎퍌링크는 귞렇지 ì•Šë‹€. 낮 홈페읎지에는 대한믌국 국회 홈페읎지에 대한 링크가 있얎도, 반대로 대한믌국 국회 홈페읎지에는 낮 홈페읎지에 대한 링크가 없을 수 있닀. 읎런 넀튞워크에는 방향읎 있Ʞ 때묞에 방향성 넀튞워크(directed network)띌고 한닀. 21.1 맀개 쀑심성 1장에서 우늬는 데읎텀 넀튞워크에서 친구의 수륌 셈윌로썚 쀑심읎 되는 죌요 핵심 읞묌을 찟았닀. 여Ʞ서는 몇 가지 추가적읞 접귌법을 삎펎볎자. End of explanation """ friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4), (4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)] """ Explanation: 넀튞워크는 사용자와 친구 ꎀ계륌 나타낞닀. End of explanation """ # give each user a friends list for user in users: user["friends"] = [] # and populate it for i, j in friendships: # this works because users[i] is the user whose id is i users[i]["friends"].append(users[j]) # add i as a friend of j users[j]["friends"].append(users[i]) # add j as a friend of i """ Explanation: 친구 목록을 각 사용자의 dict에 추가하Ʞ도 했닀. End of explanation """ # # Betweenness Centrality # def shortest_paths_from(from_user): # 특정 사용자로부터 닀륞 사용자까지의 몚든 최닚 겜로륌 포핚하는 dict shortest_paths_to = { from_user["id"] : [[]] } # 확읞핎알 하는 (읎전 사용자, 닀음 사용자) 큐 # 몚든 (from_user, from_user의 친구) 쌍윌로 시작 frontier = deque((from_user, friend) for friend in from_user["friends"]) # 큐가 빌 때까지 반복 while frontier: prev_user, user = frontier.popleft() # 큐의 첫 번짞 사용자륌 user_id = user["id"] # 제거 # 큐에 사용자륌 추가하는 방법을 고렀핎 볎멎 # prev_user까지의 최닚 겜로륌 읎믞 알고 있을 수도 있닀. paths_to_prev = shortest_paths_to[prev_user["id"]] paths_via_prev = [path + [user_id] for path in paths_to_prev] # 만앜 최닚 겜로륌 읎믞 알고 있닀멎 old_paths_to_here = shortest_paths_to.get(user_id, []) # 지ꞈ까지의 최닚 겜로는 묎엇음까? if old_paths_to_here: min_path_length = len(old_paths_to_here[0]) else: min_path_length = float('inf') # Ꞟ지 않은 새로욎 겜로만 저장 new_paths_to_here = [path_via_prev for path_via_prev in paths_via_prev if len(path_via_prev) <= min_path_length and path_via_prev not in old_paths_to_here] shortest_paths_to[user_id] = old_paths_to_here + new_paths_to_here # 아직 한번도 볎지 못한 읎웃을 frontier에 추가 frontier.extend((user, friend) for friend in user["friends"] if friend["id"] not in shortest_paths_to) return shortest_paths_to """ Explanation: 1장에서 연결 쀑심성(degree centrality)을 삎펎볌 때는, 우늬가 직ꎀ적윌로 생각했던 죌요 연결고늬듀읎 선정되지 않아 앜간 아쉬웠닀. 대안윌로 사용할 수 있는 지수 쀑 하나는 맀개 쀑심성(betweenness centrality)읞데, 읎는 두 사람 사읎의 최닚 겜로상에 빈번하게 등장하는 사람듀읎 큰 값을 가지는 지수읎닀. 구첎적윌로는, 녾드 $i$의 맀개 쀑심성은 닀륞 몚든 녾드 $j,k$ 쌍의 최닚 겜로 쀑에, $i$륌 거치는 겜로의 비윚로 계산한닀. 임의의 두 사람읎 죌얎졌을 때 ê·žë“€ 간의 최닚 겜로륌 구핎알 한닀. 읎 책에서는 덜 횚윚적읎더띌도 훚씬 읎핎하Ʞ 쉬욎 'Breadth-first search'띌고도 알렀진 알고늬슘을 사용한닀. End of explanation """ for user in users: user["shortest_paths"] = shortest_paths_from(user) """ Explanation: 귞늬고 각 녞드에 대핮 생성된 dict듀을 저장하자. End of explanation """ for user in users: user["betweenness_centrality"] = 0.0 for source in users: source_id = source["id"] for target_id, paths in source["shortest_paths"].items(): # python2에서는 items 대신 iteritems 사용 if source_id < target_id: # 잘못핎서 두 번 섞지 않도록 죌의하자 num_paths = len(paths) # 최닚 겜로가 몇 개 졎재하는가? contrib = 1 / num_paths # 쀑심성에 Ʞ여하는 값 for path in paths: for id in path: if id not in [source_id, target_id]: users[id]["betweenness_centrality"] += contrib for user in users: print(user["id"], user["betweenness_centrality"]) """ Explanation: 귞러멎 읎제 맀개 쀑심성을 구할 쀀비가 ë‹€ 되었닀. 읎제 각각의 최닚 겜로에 포핚되는 각 녞드의 맀개 쀑심성에 $1/n$을 더핮 죌자. End of explanation """ # # closeness centrality # def farness(user): """몚든 사용자와의 최닚 거늬 합""" return sum(len(paths[0]) for paths in user["shortest_paths"].values()) """ Explanation: 사용자 0곌 9의 최닚 겜로 사읎에는 닀륞 사용자가 없윌므로 맀개 쀑심성읎 0읎닀. 반멎 사용자 3, 4, 5는 최닚 겜로상에 묎척 빈번하게 위치하Ʞ 때묞에 높은 맀개 쀑심성을 가진닀. 대게 쀑심성의 절댓값 자첎는 큰 의믞륌 가지지 않고, 상대값만읎 의믞륌 가진닀. ê·ž 왞에 삎펎볌 수 있는 쀑심성 지표 쀑 하나는 귌접 쀑심성(closeness centrality)읎닀. 뚌저 각 사용자의 원접성(farness)을 계산한닀. 원접성읎란 from_user와 닀륞 몚든 사용자의 최닚 겜로륌 합한 값읎닀. End of explanation """ for user in users: user["closeness_centrality"] = 1 / farness(user) for user in users: print(user["id"], user["closeness_centrality"]) """ Explanation: 읎제 귌접 쀑심성은 간닚히 계산할 수 있닀. End of explanation """ def matrix_product_entry(A, B, i, j): return dot(get_row(A, i), get_column(B, j)) def matrix_multiply(A, B): n1, k1 = shape(A) n2, k2 = shape(B) if k1 != n2: raise ArithmeticError("incompatible shapes!") return make_matrix(n1, k2, partial(matrix_product_entry, A, B)) def vector_as_matrix(v): """(list 형태의) 벡터 v륌 n x 1 행렬로 변환""" return [[v_i] for v_i in v] def vector_from_matrix(v_as_matrix): """n x 1 행렬을 늬슀튞로 변환""" return [row[0] for row in v_as_matrix] def matrix_operate(A, v): v_as_matrix = vector_as_matrix(v) product = matrix_multiply(A, v_as_matrix) return vector_from_matrix(product) """ Explanation: 계산된 귌접 쀑심성의 펞찚는 더욱 ìž‘ë‹€. 넀튞워크 쀑심에 있는 녞드조찚 왞곜에 위치한 녞드듀로부터 멀늬 ë–šì–Žì ž 있Ʞ 때묞읎닀. 여Ʞ서 뎀듯읎 최닚 겜로륌 계산하는 것은 ꜀나 복잡하닀. 귞렇Ʞ 때묞에 큰 넀튞워크에서는 귌접 쀑심성을 자죌 사용하지 않는닀. 덜 직ꎀ적읎지만 볎통 더 쉜게 계산할 수 있는 고유벡터 쀑심성(eigenvector centrality)을 더 자죌 사용한닀. 21.2 고유벡터 쀑심성 고유벡터 쀑심성에 대핮 알아볎Ʞ 전에 뚌저 고유벡터가 묎엇읞지 삎펎뎐알 하고, 고유벡터가 묎엇읞지 알Ʞ 위핎서는 뚌저 행렬 연산에 대핮 알아뎐알 한닀. 21.2.1 행렬 연산 End of explanation """ def find_eigenvector(A, tolerance=0.00001): guess = [1 for __ in A] while True: result = matrix_operate(A, guess) length = magnitude(result) next_guess = scalar_multiply(1/length, result) if distance(guess, next_guess) < tolerance: return next_guess, length # eigenvector, eigenvalue guess = next_guess """ Explanation: 행렬 A의 고유 벡터륌 ì°Ÿêž° 위핎, 임의의 벡터 $v$륌 곚띌 matrix_operate륌 수행하고, 결곌값의 크Ʞ가 1읎 되게 재조정하는 곌정을 반복 수행한닀. End of explanation """ rotate = [[0, 1], [-1, 0]] """ Explanation: 결곌값윌로 반환되는 guess륌 matrix_operate륌 통핎 결곌값의 크Ʞ가 1읞 벡터로 재조정하멎, 자Ʞ 자신읎 반환된닀. 슉, 여Ʞ서 guess는 고유벡터띌는 것을 의믞한닀. 몚든 싀수 행렬에 고유벡터와 고유값읎 있는 것은 아니닀. 예륌 듀얎 시계 방향윌로 90도 회전하는 연산을 하는 닀음 행렬에는 곱했을 때 가지 자신읎 되는 벡터는 영벡터밖에 없닀. End of explanation """ flip = [[0, 1], [1, 0]] """ Explanation: 읎 행렬로 앞서 구현한 find_eignevector(rotate)륌 수행하멎, 영원히 끝나지 않을 것읎닀. 한펾, 고유벡터가 있는 행렬도 때로는 묎한룚프에 빠질 수 있닀. End of explanation """ # # eigenvector centrality # def entry_fn(i, j): return 1 if (i, j) in friendships or (j, i) in friendships else 0 n = len(users) adjacency_matrix = make_matrix(n, n, entry_fn) adjacency_matrix """ Explanation: 읎 행렬은 몚든 벡터 [x, y]륌 [y, x]로 변환한닀. 따띌서 [1, 1]은 고유값읎 1읞 고유벡터가 된닀. 하지만 x, y값읎 닀륞 임의의 벡터에서 출발핎서 find_eigenvector륌 수행하멎 x, y값을 바꟞는 연산만 묎한히 수행할 것읎닀. (NumPy같은 띌읎람러늬에는 읎런 쌀읎슀까지 닀룰 수 있는 닀양한 방법듀읎 구현되얎 있닀.) 읎런 사소한 묞제에도 불구하고, 얎욌든 find_eigenvector가 결곌값을 반환한닀멎, ê·ž 결곌값은 곧 고유벡터읎닀. 21.2.2 쀑심성 고유벡터가 데읎터 넀튞워크륌 읎핎하는데 얎떻게 도움을 쀄까? 얘Ʞ륌 하Ʞ 전에 뚌저 넀튞워크륌 읞접행렬(adjacency matrix)의 형태로 나타낮 볎자. 읎 행렬은 사용자 i와 사용자 j가 친구읞 겜우 (i, j)번짞 항목에 1읎 있고, 친구가 아닌 겜우 0읎 있는 행렬읎닀. End of explanation """ eigenvector_centralities, _ = find_eigenvector(adjacency_matrix) for user_id, centrality in enumerate(eigenvector_centralities): print(user_id, centrality) """ Explanation: 각 사용자의 고유벡터 쀑심성읎란 find_eigenvector로 찟은 사용자의 고유벡터가 된닀. End of explanation """ # # directed graphs # endorsements = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2), (2, 1), (1, 3), (2, 3), (3, 4), (5, 4), (5, 6), (7, 5), (6, 8), (8, 7), (8, 9)] for user in users: user["endorses"] = [] # add one list to track outgoing endorsements user["endorsed_by"] = [] # and another to track endorsements for source_id, target_id in endorsements: users[source_id]["endorses"].append(users[target_id]) users[target_id]["endorsed_by"].append(users[source_id]) """ Explanation: 연결의 수가 많고, 쀑심성읎 높은 사용자듀한테 연결된 사용자듀은 고유벡터 쀑심성읎 높닀. 앞의 결곌에 따륎멎 사용자 1, 사용자 2의 쀑심성읎 가장 높은데, 읎는 쀑심성읎 높은 사람듀곌 섞번읎나 연결되었Ʞ 때묞읎닀. 읎듀로부터 멀얎질수록 사용자듀의 쀑심성은 점찚 쀄얎든닀. 21.3 방향성 귞래프(Directed graphs)와 페읎지랭크 데읎텀읎 읞Ʞ륌 별로 끌지 못하자, 순읎익 팀의 부사장은 친구 몚덞에서 볎슝(endorsement)몚덞로 전향하는 것을 ê³ ë € 쀑읎닀. 알고 볎니 사람듀은 ì–Žë–€ 데읎터 곌학자듀끌늬 친구읞지에 대핎서는 별로 ꎀ심읎 없었지만, 헀드헌터듀은 닀륞 데읎터 곌학자로부터 졎겜 받는 데읎터 곌학자가 누구읞지에 대핮 ꎀ심읎 많닀. 읎 새로욎 몚덞에서 ꎀ계는 상혞적읞 것읎 아니띌, 한 사람(source)읎 닀륞 멋진 한 사람(target)의 싀력에 볎슝을 서죌는 (source, target) 쌍윌로 비대칭적읞 ꎀ계륌 표현하게 된닀. End of explanation """ endorsements_by_id = [(user["id"], len(user["endorsed_by"])) for user in users] sorted(endorsements_by_id, key=lambda x: x[1], # (user_id, num_endorsements) reverse=True) """ Explanation: 귞늬고 가장 볎슝을 많읎 받은 데읎터 곌학자듀의 데읎터륌 수집핎서, 귞것을 헀드헌터듀한테 팔멎 된닀. End of explanation """ def page_rank(users, damping = 0.85, num_iters = 100): # 뚌저 페읎지랭크륌 몚든 녞드에 고륎게 배당 num_users = len(users) pr = { user["id"] : 1 / num_users for user in users } # 맀 슀텝마닀 각 녞드가 받는 # 적은 양의 페읎지랭크 base_pr = (1 - damping) / num_users for __ in range(num_iters): next_pr = { user["id"] : base_pr for user in users } for user in users: # 페읎지랭크륌 왞부로 향하는 링크에 배당한닀. links_pr = pr[user["id"]] * damping for endorsee in user["endorses"]: next_pr[endorsee["id"]] += links_pr / len(user["endorses"]) pr = next_pr return pr for user_id, pr in page_rank(users).items(): print(user_id, pr) """ Explanation: 사싀 '볎슝의 수'와 같은 숫자는 조작하Ʞ가 맀우 쉜닀. 가장 ê°„ë‹ší•œ 방법 쀑 하나는, 가짜 계정을 여러 개 만듀얎서 귞것듀로 낮 계정에 대한 볎슝을 서는 것읎닀. 또 닀륞 방법은, 친구듀끌늬 짜고 서로가 서로륌 볎슝핎 죌는 것읎닀. (아마 사용자 0, 1, 2가 읎런 ꎀ계음 가능성읎 크닀.) 좀 더 나은 지수는, '누가' 볎슝을 서는지륌 고렀하는 것읎닀. 볎슝을 많읎 받은 사용자가 볎슝을 ì„€ 때는, 볎슝을 적게 받은 사용자가 볎슝을 ì„€ 때볎닀 더 쀑요한 것윌로 받아듀여지는 것읎 타당하닀. 귞늬고 사싀 읎것은 유명한 페읎지랭크(PageRank) 알고늬슘의 Ʞ볞 철학읎Ʞ도 하닀. 1. 넀튞워크 전첎에는 1.0(또는 100%)의 페읎지랭크가 있닀. 2. 쎈Ʞ에 읎 페읎지랭크륌 몚든 녞드에 고륎게 배당한닀. 3. 각 슀텝을 ê±°ì¹  때마닀 각 녞드에 배당된 페읎지랭크의 대부분은 왞부로 향하는 링크에 균등하게 배당한닀. 4. 각 슀텝을 ê±°ì¹  때마닀 각 녞드에 ë‚šì•„ 있는 페읎지랭크륌 몚든 녞드에 고륎게 배당한닀. End of explanation """
Open-Power-System-Data/renewable_power_plants
download_and_process.ipynb
mit
version = '2020-08-25' """ Explanation: <div style="width:100%; background-color: #D9EDF7; border: 1px solid #CFCFCF; text-align: left; padding: 10px;"> <b>Renewable power plants: Download and process notebook</b> <ul> <li><a href="main.ipynb">Main notebook</a></li> <li>Download and process notebook</li> <li><a href="validation_and_output.ipynb">Validation and output notebook</a></li> </ul> <br>This notebook is part of the <a href="http://data.open-power-system-data.org/renewable_power_plants"> Renewable power plants Data Package</a> of <a href="http://open-power-system-data.org">Open Power System Data</a>. </div> This script downlads and extracts the original data of renewable power plant lists from the data sources, processes and merges them. It subsequently adds the geolocation for each power plant. Finally it saves the DataFrames as pickle-files. Make sure you run the download and process Notebook before the validation and output Notebook. <h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Script-setup" data-toc-modified-id="Script-setup-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Script setup</a></span></li><li><span><a href="#Settings" data-toc-modified-id="Settings-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Settings</a></span><ul class="toc-item"><li><span><a href="#Choose-download-option" data-toc-modified-id="Choose-download-option-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Choose download option</a></span></li><li><span><a href="#Update-the-download-links" data-toc-modified-id="Update-the-download-links-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Update the download links</a></span></li><li><span><a href="#Set-up-the-downloader-for-data-sources" data-toc-modified-id="Set-up-the-downloader-for-data-sources-2.3"><span class="toc-item-num">2.3&nbsp;&nbsp;</span>Set up the downloader for data sources</a></span></li><li><span><a href="#Set-up-the-NUTS-converter" data-toc-modified-id="Set-up-the-NUTS-converter-2.4"><span class="toc-item-num">2.4&nbsp;&nbsp;</span>Set up the NUTS converter</a></span></li><li><span><a href="#Setup-translation-dictionaries" data-toc-modified-id="Setup-translation-dictionaries-2.5"><span class="toc-item-num">2.5&nbsp;&nbsp;</span>Setup translation dictionaries</a></span></li></ul></li><li><span><a href="#Download-and-process-per-country" data-toc-modified-id="Download-and-process-per-country-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Download and process per country</a></span><ul class="toc-item"><li><span><a href="#Germany-DE" data-toc-modified-id="Germany-DE-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Germany DE</a></span><ul class="toc-item"><li><span><a href="#Download-and-read" data-toc-modified-id="Download-and-read-3.1.1"><span class="toc-item-num">3.1.1&nbsp;&nbsp;</span>Download and read</a></span></li><li><span><a href="#Translate-column-names" data-toc-modified-id="Translate-column-names-3.1.2"><span class="toc-item-num">3.1.2&nbsp;&nbsp;</span>Translate column names</a></span></li><li><span><a href="#Add-information-and-choose-columns" data-toc-modified-id="Add-information-and-choose-columns-3.1.3"><span class="toc-item-num">3.1.3&nbsp;&nbsp;</span>Add information and choose columns</a></span></li><li><span><a href="#Merge-DataFrames" data-toc-modified-id="Merge-DataFrames-3.1.4"><span class="toc-item-num">3.1.4&nbsp;&nbsp;</span>Merge DataFrames</a></span></li><li><span><a href="#Translate-values-and-harmonize-energy-source-level-2" data-toc-modified-id="Translate-values-and-harmonize-energy-source-level-2-3.1.5"><span class="toc-item-num">3.1.5&nbsp;&nbsp;</span>Translate values and harmonize energy source level 2</a></span></li><li><span><a href="#Transform-electrical-capacity-from-kW-to-MW" data-toc-modified-id="Transform-electrical-capacity-from-kW-to-MW-3.1.6"><span class="toc-item-num">3.1.6&nbsp;&nbsp;</span>Transform electrical capacity from kW to MW</a></span></li><li><span><a href="#Georeferencing" data-toc-modified-id="Georeferencing-3.1.7"><span class="toc-item-num">3.1.7&nbsp;&nbsp;</span>Georeferencing</a></span><ul class="toc-item"><li><span><a href="#Get-coordinates-by-postcode" data-toc-modified-id="Get-coordinates-by-postcode-3.1.7.1"><span class="toc-item-num">3.1.7.1&nbsp;&nbsp;</span>Get coordinates by postcode</a></span></li><li><span><a href="#Transform-geoinformation" data-toc-modified-id="Transform-geoinformation-3.1.7.2"><span class="toc-item-num">3.1.7.2&nbsp;&nbsp;</span>Transform geoinformation</a></span></li></ul></li><li><span><a href="#Clean-data" data-toc-modified-id="Clean-data-3.1.8"><span class="toc-item-num">3.1.8&nbsp;&nbsp;</span>Clean data</a></span></li><li><span><a href="#Assign-NUTS-codes" data-toc-modified-id="Assign-NUTS-codes-3.1.9"><span class="toc-item-num">3.1.9&nbsp;&nbsp;</span>Assign NUTS codes</a></span></li><li><span><a href="#Visualize" data-toc-modified-id="Visualize-3.1.10"><span class="toc-item-num">3.1.10&nbsp;&nbsp;</span>Visualize</a></span></li><li><span><a href="#Save" data-toc-modified-id="Save-3.1.11"><span class="toc-item-num">3.1.11&nbsp;&nbsp;</span>Save</a></span></li></ul></li><li><span><a href="#Denmark-DK" data-toc-modified-id="Denmark-DK-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Denmark DK</a></span><ul class="toc-item"><li><span><a href="#Download-and-read" data-toc-modified-id="Download-and-read-3.2.1"><span class="toc-item-num">3.2.1&nbsp;&nbsp;</span>Download and read</a></span></li><li><span><a href="#Translate-column-names" data-toc-modified-id="Translate-column-names-3.2.2"><span class="toc-item-num">3.2.2&nbsp;&nbsp;</span>Translate column names</a></span></li><li><span><a href="#Add-data-source-and-missing-information" data-toc-modified-id="Add-data-source-and-missing-information-3.2.3"><span class="toc-item-num">3.2.3&nbsp;&nbsp;</span>Add data source and missing information</a></span></li><li><span><a href="#Correct-the-dates" data-toc-modified-id="Correct-the-dates-3.2.4"><span class="toc-item-num">3.2.4&nbsp;&nbsp;</span>Correct the dates</a></span></li><li><span><a href="#Translate-values-and-harmonize-energy-source-level-2" data-toc-modified-id="Translate-values-and-harmonize-energy-source-level-2-3.2.5"><span class="toc-item-num">3.2.5&nbsp;&nbsp;</span>Translate values and harmonize energy source level 2</a></span></li><li><span><a href="#Georeferencing" data-toc-modified-id="Georeferencing-3.2.6"><span class="toc-item-num">3.2.6&nbsp;&nbsp;</span>Georeferencing</a></span></li><li><span><a href="#Merge-DataFrames,-add-NUTS-information-and-choose-columns" data-toc-modified-id="Merge-DataFrames,-add-NUTS-information-and-choose-columns-3.2.7"><span class="toc-item-num">3.2.7&nbsp;&nbsp;</span>Merge DataFrames, add NUTS information and choose columns</a></span></li><li><span><a href="#Select-columns" data-toc-modified-id="Select-columns-3.2.8"><span class="toc-item-num">3.2.8&nbsp;&nbsp;</span>Select columns</a></span></li><li><span><a href="#Remove-duplicate-rows" data-toc-modified-id="Remove-duplicate-rows-3.2.9"><span class="toc-item-num">3.2.9&nbsp;&nbsp;</span>Remove duplicate rows</a></span></li><li><span><a href="#Transform-electrical_capacity-from-kW-to-MW" data-toc-modified-id="Transform-electrical_capacity-from-kW-to-MW-3.2.10"><span class="toc-item-num">3.2.10&nbsp;&nbsp;</span>Transform electrical_capacity from kW to MW</a></span></li><li><span><a href="#Visualize" data-toc-modified-id="Visualize-3.2.11"><span class="toc-item-num">3.2.11&nbsp;&nbsp;</span>Visualize</a></span></li><li><span><a href="#Save" data-toc-modified-id="Save-3.2.12"><span class="toc-item-num">3.2.12&nbsp;&nbsp;</span>Save</a></span></li></ul></li><li><span><a href="#France-FR" data-toc-modified-id="France-FR-3.3"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>France FR</a></span><ul class="toc-item"><li><span><a href="#ODRE-data" data-toc-modified-id="ODRE-data-3.3.1"><span class="toc-item-num">3.3.1&nbsp;&nbsp;</span>ODRE data</a></span><ul class="toc-item"><li><span><a href="#Load-the-data" data-toc-modified-id="Load-the-data-3.3.1.1"><span class="toc-item-num">3.3.1.1&nbsp;&nbsp;</span>Load the data</a></span></li><li><span><a href="#Translate-column-names" data-toc-modified-id="Translate-column-names-3.3.1.2"><span class="toc-item-num">3.3.1.2&nbsp;&nbsp;</span>Translate column names</a></span></li><li><span><a href="#Add-data-source" data-toc-modified-id="Add-data-source-3.3.1.3"><span class="toc-item-num">3.3.1.3&nbsp;&nbsp;</span>Add data source</a></span></li><li><span><a href="#Translate-values" data-toc-modified-id="Translate-values-3.3.1.4"><span class="toc-item-num">3.3.1.4&nbsp;&nbsp;</span>Translate values</a></span></li><li><span><a href="#Correct-site-names" data-toc-modified-id="Correct-site-names-3.3.1.5"><span class="toc-item-num">3.3.1.5&nbsp;&nbsp;</span>Correct site names</a></span></li><li><span><a href="#Replace-suspicious-dates-with-N/A" data-toc-modified-id="Replace-suspicious-dates-with-N/A-3.3.1.6"><span class="toc-item-num">3.3.1.6&nbsp;&nbsp;</span>Replace suspicious dates with N/A</a></span></li><li><span><a href="#Check-missing-values" data-toc-modified-id="Check-missing-values-3.3.1.7"><span class="toc-item-num">3.3.1.7&nbsp;&nbsp;</span>Check missing values</a></span></li><li><span><a href="#Standardize-the-energy-types-and-technologies" data-toc-modified-id="Standardize-the-energy-types-and-technologies-3.3.1.8"><span class="toc-item-num">3.3.1.8&nbsp;&nbsp;</span>Standardize the energy types and technologies</a></span></li><li><span><a href="#Standardize-source-levels-1-3-and-technology" data-toc-modified-id="Standardize-source-levels-1-3-and-technology-3.3.1.9"><span class="toc-item-num">3.3.1.9&nbsp;&nbsp;</span>Standardize source levels 1-3 and technology</a></span></li><li><span><a href="#Georeferencing" data-toc-modified-id="Georeferencing-3.3.1.10"><span class="toc-item-num">3.3.1.10&nbsp;&nbsp;</span>Georeferencing</a></span></li><li><span><a href="#Convert-electrical-capacity-to-MW" data-toc-modified-id="Convert-electrical-capacity-to-MW-3.3.1.11"><span class="toc-item-num">3.3.1.11&nbsp;&nbsp;</span>Convert electrical capacity to MW</a></span></li></ul></li><li><span><a href="#Old-data" data-toc-modified-id="Old-data-3.3.2"><span class="toc-item-num">3.3.2&nbsp;&nbsp;</span>Old data</a></span><ul class="toc-item"><li><span><a href="#Add-data-source" data-toc-modified-id="Add-data-source-3.3.2.1"><span class="toc-item-num">3.3.2.1&nbsp;&nbsp;</span>Add data source</a></span></li><li><span><a href="#Translate-values-and-harmonize-energy-source-level-2" data-toc-modified-id="Translate-values-and-harmonize-energy-source-level-2-3.3.2.2"><span class="toc-item-num">3.3.2.2&nbsp;&nbsp;</span>Translate values and harmonize energy source level 2</a></span></li><li><span><a href="#Georeferencing" data-toc-modified-id="Georeferencing-3.3.2.3"><span class="toc-item-num">3.3.2.3&nbsp;&nbsp;</span>Georeferencing</a></span></li></ul></li><li><span><a href="#Integrate-old-and-new-data" data-toc-modified-id="Integrate-old-and-new-data-3.3.3"><span class="toc-item-num">3.3.3&nbsp;&nbsp;</span>Integrate old and new data</a></span></li><li><span><a href="#Select-the-columns" data-toc-modified-id="Select-the-columns-3.3.4"><span class="toc-item-num">3.3.4&nbsp;&nbsp;</span>Select the columns</a></span></li><li><span><a href="#Visualize" data-toc-modified-id="Visualize-3.3.5"><span class="toc-item-num">3.3.5&nbsp;&nbsp;</span>Visualize</a></span></li><li><span><a href="#Save" data-toc-modified-id="Save-3.3.6"><span class="toc-item-num">3.3.6&nbsp;&nbsp;</span>Save</a></span></li></ul></li><li><span><a href="#Poland-PL" data-toc-modified-id="Poland-PL-3.4"><span class="toc-item-num">3.4&nbsp;&nbsp;</span>Poland PL</a></span><ul class="toc-item"><li><span><a href="#Download" data-toc-modified-id="Download-3.4.1"><span class="toc-item-num">3.4.1&nbsp;&nbsp;</span>Download</a></span></li><li><span><a href="#Load-and-explore-the-data" data-toc-modified-id="Load-and-explore-the-data-3.4.2"><span class="toc-item-num">3.4.2&nbsp;&nbsp;</span>Load and explore the data</a></span></li><li><span><a href="#Inspect-the-data" data-toc-modified-id="Inspect-the-data-3.4.3"><span class="toc-item-num">3.4.3&nbsp;&nbsp;</span>Inspect the data</a></span></li><li><span><a href="#Harmonising-energy-levels" data-toc-modified-id="Harmonising-energy-levels-3.4.4"><span class="toc-item-num">3.4.4&nbsp;&nbsp;</span>Harmonising energy levels</a></span></li><li><span><a href="#Georeferencing-(NUTS-classification)" data-toc-modified-id="Georeferencing-(NUTS-classification)-3.4.5"><span class="toc-item-num">3.4.5&nbsp;&nbsp;</span>Georeferencing (NUTS classification)</a></span><ul class="toc-item"><li><span><a href="#Add-NUTS-information" data-toc-modified-id="Add-NUTS-information-3.4.5.1"><span class="toc-item-num">3.4.5.1&nbsp;&nbsp;</span>Add NUTS information</a></span></li></ul></li><li><span><a href="#Add-data-source-and-year" data-toc-modified-id="Add-data-source-and-year-3.4.6"><span class="toc-item-num">3.4.6&nbsp;&nbsp;</span>Add data source and year</a></span></li><li><span><a href="#Select-columns" data-toc-modified-id="Select-columns-3.4.7"><span class="toc-item-num">3.4.7&nbsp;&nbsp;</span>Select columns</a></span></li><li><span><a href="#Save" data-toc-modified-id="Save-3.4.8"><span class="toc-item-num">3.4.8&nbsp;&nbsp;</span>Save</a></span></li></ul></li><li><span><a href="#Switzerland-CH" data-toc-modified-id="Switzerland-CH-3.5"><span class="toc-item-num">3.5&nbsp;&nbsp;</span>Switzerland CH</a></span><ul class="toc-item"><li><span><a href="#Download-and-read" data-toc-modified-id="Download-and-read-3.5.1"><span class="toc-item-num">3.5.1&nbsp;&nbsp;</span>Download and read</a></span></li><li><span><a href="#Translate-column-names" data-toc-modified-id="Translate-column-names-3.5.2"><span class="toc-item-num">3.5.2&nbsp;&nbsp;</span>Translate column names</a></span></li><li><span><a href="#Add-data-source" data-toc-modified-id="Add-data-source-3.5.3"><span class="toc-item-num">3.5.3&nbsp;&nbsp;</span>Add data source</a></span></li><li><span><a href="#Harmonize-energy-source-hierarchy-and-translate-values" data-toc-modified-id="Harmonize-energy-source-hierarchy-and-translate-values-3.5.4"><span class="toc-item-num">3.5.4&nbsp;&nbsp;</span>Harmonize energy source hierarchy and translate values</a></span></li><li><span><a href="#Georeferencing" data-toc-modified-id="Georeferencing-3.5.5"><span class="toc-item-num">3.5.5&nbsp;&nbsp;</span>Georeferencing</a></span><ul class="toc-item"><li><span><a href="#Postcode-to-lat/lon-(WGS84)" data-toc-modified-id="Postcode-to-lat/lon-(WGS84)-3.5.5.1"><span class="toc-item-num">3.5.5.1&nbsp;&nbsp;</span>Postcode to lat/lon (WGS84)</a></span></li><li><span><a href="#Add-NUTS-information" data-toc-modified-id="Add-NUTS-information-3.5.5.2"><span class="toc-item-num">3.5.5.2&nbsp;&nbsp;</span>Add NUTS information</a></span></li></ul></li><li><span><a href="#Transform-electrical_capacity-from-kW-to-MW" data-toc-modified-id="Transform-electrical_capacity-from-kW-to-MW-3.5.6"><span class="toc-item-num">3.5.6&nbsp;&nbsp;</span>Transform electrical_capacity from kW to MW</a></span></li><li><span><a href="#Select-columns-to-keep" data-toc-modified-id="Select-columns-to-keep-3.5.7"><span class="toc-item-num">3.5.7&nbsp;&nbsp;</span>Select columns to keep</a></span></li><li><span><a href="#Visualize" data-toc-modified-id="Visualize-3.5.8"><span class="toc-item-num">3.5.8&nbsp;&nbsp;</span>Visualize</a></span></li><li><span><a href="#Save" data-toc-modified-id="Save-3.5.9"><span class="toc-item-num">3.5.9&nbsp;&nbsp;</span>Save</a></span></li></ul></li><li><span><a href="#United-Kingdom-UK" data-toc-modified-id="United-Kingdom-UK-3.6"><span class="toc-item-num">3.6&nbsp;&nbsp;</span>United Kingdom UK</a></span><ul class="toc-item"><li><span><a href="#Download-and-Read" data-toc-modified-id="Download-and-Read-3.6.1"><span class="toc-item-num">3.6.1&nbsp;&nbsp;</span>Download and Read</a></span></li><li><span><a href="#Clean-the-data" data-toc-modified-id="Clean-the-data-3.6.2"><span class="toc-item-num">3.6.2&nbsp;&nbsp;</span>Clean the data</a></span></li><li><span><a href="#Translate-column-names" data-toc-modified-id="Translate-column-names-3.6.3"><span class="toc-item-num">3.6.3&nbsp;&nbsp;</span>Translate column names</a></span></li><li><span><a href="#Add-data-source" data-toc-modified-id="Add-data-source-3.6.4"><span class="toc-item-num">3.6.4&nbsp;&nbsp;</span>Add data source</a></span></li><li><span><a href="#Translate-values-and-harmonise-energy-source-levels-1-3-and-technology" data-toc-modified-id="Translate-values-and-harmonise-energy-source-levels-1-3-and-technology-3.6.5"><span class="toc-item-num">3.6.5&nbsp;&nbsp;</span>Translate values and harmonise energy source levels 1-3 and technology</a></span></li><li><span><a href="#Georeferencing" data-toc-modified-id="Georeferencing-3.6.6"><span class="toc-item-num">3.6.6&nbsp;&nbsp;</span>Georeferencing</a></span><ul class="toc-item"><li><span><a href="#Cases-with-unknown-Easting-and-Northing-coordinates" data-toc-modified-id="Cases-with-unknown-Easting-and-Northing-coordinates-3.6.6.1"><span class="toc-item-num">3.6.6.1&nbsp;&nbsp;</span>Cases with unknown Easting and Northing coordinates</a></span></li><li><span><a href="#Cases-for-approximation" data-toc-modified-id="Cases-for-approximation-3.6.6.2"><span class="toc-item-num">3.6.6.2&nbsp;&nbsp;</span>Cases for approximation</a></span></li><li><span><a href="#Add-NUTS-information" data-toc-modified-id="Add-NUTS-information-3.6.6.3"><span class="toc-item-num">3.6.6.3&nbsp;&nbsp;</span>Add NUTS information</a></span></li><li><span><a href="#Visualize-the-data" data-toc-modified-id="Visualize-the-data-3.6.6.4"><span class="toc-item-num">3.6.6.4&nbsp;&nbsp;</span>Visualize the data</a></span></li></ul></li><li><span><a href="#Keep-only-the-columns-of-interest" data-toc-modified-id="Keep-only-the-columns-of-interest-3.6.7"><span class="toc-item-num">3.6.7&nbsp;&nbsp;</span>Keep only the columns of interest</a></span></li><li><span><a href="#Save" data-toc-modified-id="Save-3.6.8"><span class="toc-item-num">3.6.8&nbsp;&nbsp;</span>Save</a></span></li></ul></li><li><span><a href="#Sweden" data-toc-modified-id="Sweden-3.7"><span class="toc-item-num">3.7&nbsp;&nbsp;</span>Sweden</a></span><ul class="toc-item"><li><span><a href="#Load-the-data" data-toc-modified-id="Load-the-data-3.7.1"><span class="toc-item-num">3.7.1&nbsp;&nbsp;</span>Load the data</a></span></li><li><span><a href="#Clean-the-data" data-toc-modified-id="Clean-the-data-3.7.2"><span class="toc-item-num">3.7.2&nbsp;&nbsp;</span>Clean the data</a></span></li><li><span><a href="#Translate-column-names" data-toc-modified-id="Translate-column-names-3.7.3"><span class="toc-item-num">3.7.3&nbsp;&nbsp;</span>Translate column names</a></span></li><li><span><a href="#Correct-the-dates" data-toc-modified-id="Correct-the-dates-3.7.4"><span class="toc-item-num">3.7.4&nbsp;&nbsp;</span>Correct the dates</a></span></li><li><span><a href="#Add-source" data-toc-modified-id="Add-source-3.7.5"><span class="toc-item-num">3.7.5&nbsp;&nbsp;</span>Add source</a></span></li><li><span><a href="#Translate-values-and-harmonize-energy-source-levels" data-toc-modified-id="Translate-values-and-harmonize-energy-source-levels-3.7.6"><span class="toc-item-num">3.7.6&nbsp;&nbsp;</span>Translate values and harmonize energy source levels</a></span></li><li><span><a href="#Georeferencing" data-toc-modified-id="Georeferencing-3.7.7"><span class="toc-item-num">3.7.7&nbsp;&nbsp;</span>Georeferencing</a></span></li><li><span><a href="#Assigning-NUTS-codes" data-toc-modified-id="Assigning-NUTS-codes-3.7.8"><span class="toc-item-num">3.7.8&nbsp;&nbsp;</span>Assigning NUTS codes</a></span></li><li><span><a href="#Select-the-columns-to-keep" data-toc-modified-id="Select-the-columns-to-keep-3.7.9"><span class="toc-item-num">3.7.9&nbsp;&nbsp;</span>Select the columns to keep</a></span></li><li><span><a href="#Visualize" data-toc-modified-id="Visualize-3.7.10"><span class="toc-item-num">3.7.10&nbsp;&nbsp;</span>Visualize</a></span></li><li><span><a href="#Save" data-toc-modified-id="Save-3.7.11"><span class="toc-item-num">3.7.11&nbsp;&nbsp;</span>Save</a></span></li></ul></li><li><span><a href="#Czech-Republic" data-toc-modified-id="Czech-Republic-3.8"><span class="toc-item-num">3.8&nbsp;&nbsp;</span>Czech Republic</a></span><ul class="toc-item"><li><span><a href="#Download-and-read-the-data" data-toc-modified-id="Download-and-read-the-data-3.8.1"><span class="toc-item-num">3.8.1&nbsp;&nbsp;</span>Download and read the data</a></span></li><li><span><a href="#Clean-the-data" data-toc-modified-id="Clean-the-data-3.8.2"><span class="toc-item-num">3.8.2&nbsp;&nbsp;</span>Clean the data</a></span></li><li><span><a href="#Reformat-the-data" data-toc-modified-id="Reformat-the-data-3.8.3"><span class="toc-item-num">3.8.3&nbsp;&nbsp;</span>Reformat the data</a></span></li><li><span><a href="#Translate-column-names" data-toc-modified-id="Translate-column-names-3.8.4"><span class="toc-item-num">3.8.4&nbsp;&nbsp;</span>Translate column names</a></span></li><li><span><a href="#Translate-values-and-harmonize-energy-levels" data-toc-modified-id="Translate-values-and-harmonize-energy-levels-3.8.5"><span class="toc-item-num">3.8.5&nbsp;&nbsp;</span>Translate values and harmonize energy levels</a></span></li><li><span><a href="#Add-data-source" data-toc-modified-id="Add-data-source-3.8.6"><span class="toc-item-num">3.8.6&nbsp;&nbsp;</span>Add data source</a></span></li><li><span><a href="#Georeferencing" data-toc-modified-id="Georeferencing-3.8.7"><span class="toc-item-num">3.8.7&nbsp;&nbsp;</span>Georeferencing</a></span></li><li><span><a href="#Assign-NUTS-codes" data-toc-modified-id="Assign-NUTS-codes-3.8.8"><span class="toc-item-num">3.8.8&nbsp;&nbsp;</span>Assign NUTS codes</a></span></li><li><span><a href="#Select-the-columns-to-keep" data-toc-modified-id="Select-the-columns-to-keep-3.8.9"><span class="toc-item-num">3.8.9&nbsp;&nbsp;</span>Select the columns to keep</a></span></li><li><span><a href="#Drop-duplicates" data-toc-modified-id="Drop-duplicates-3.8.10"><span class="toc-item-num">3.8.10&nbsp;&nbsp;</span>Drop duplicates</a></span></li><li><span><a href="#Visualuze" data-toc-modified-id="Visualuze-3.8.11"><span class="toc-item-num">3.8.11&nbsp;&nbsp;</span>Visualuze</a></span></li><li><span><a href="#Save" data-toc-modified-id="Save-3.8.12"><span class="toc-item-num">3.8.12&nbsp;&nbsp;</span>Save</a></span></li></ul></li></ul></li><li><span><a href="#Zip-the-raw-data" data-toc-modified-id="Zip-the-raw-data-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Zip the raw data</a></span></li></ul></div> End of explanation """ import logging import os import posixpath import urllib.parse import urllib.request import re import zipfile import pickle import urllib import shutil import datetime import numpy as np import pandas as pd import utm # for transforming geoinformation in the utm format import requests import fake_useragent from string import Template from IPython.display import display import xlrd import bs4 import bng_to_latlon import matplotlib.pyplot as plt from matplotlib.pyplot import figure # for visualizing locations on maps import cartopy.crs as ccrs import cartopy.feature as cfeature from cartopy.io import shapereader import geopandas import shapely logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%d %b %Y %H:%M:%S' ) logger = logging.getLogger() # Create input, intermediate and output folders if they don't exist. # If the paths are relative, the correspoding folders will be created # inside the current working directory. input_directory_path = os.path.join('input', 'original_data') intermediate_directory_path = 'intermediate' output_directory_path = os.path.join('output', 'renewable_power_plants') os.makedirs(input_directory_path, exist_ok=True) os.makedirs(intermediate_directory_path, exist_ok=True) os.makedirs(output_directory_path, exist_ok=True) # Create the folder to which the Eurostat files with data at the level of the whole EU/Europe #are going to be downloaded eurostat_eu_directory_path = os.path.join('input', 'eurostat_eu') os.makedirs(eurostat_eu_directory_path, exist_ok=True) # Define the path of the file with the list of sources. source_list_filepath = os.path.join('input', 'sources.csv') # Import the utility functions and classes from the util package import util.helper from util.visualizer import visualize_points """ Explanation: Script setup End of explanation """ download_from = 'original_sources' #download_from = 'opsd_server' """ Explanation: Settings Choose download option The original data can either be downloaded from the original data sources as specified below or from the opsd-Server. Default option is to download from the original sources as the aim of the project is to stay as close to original sources as possible. However, if problems with downloads e.g. due to changing urls occur, you can still run the script with the original data from the opsd_server. End of explanation """ source_df = pd.read_csv(source_list_filepath) uk_main_page = 'https://www.gov.uk/government/publications/renewable-energy-planning-database-monthly-extract' current_link = util.helper.get_beis_link(uk_main_page) current_filename = current_link.split('/')[-1] source_df.loc[(source_df['country'] == 'UK') & (source_df['source'] == 'BEIS'), 'url'] = current_link source_df.loc[(source_df['country'] == 'UK') & (source_df['source'] == 'BEIS'), 'filename'] = current_filename source_df.to_csv(source_list_filepath, index=False, header=True) source_df.fillna('') """ Explanation: Update the download links The download link for the UK is updated at the end of each quarter by the source provider, BEIS. We keep up with those changes by extracting the download link automatically from the web page it is on. That way, the link does not have to be updated manually. Note: you must be connected to the Internet if you want to execute this step. End of explanation """ import util.downloader from util.downloader import Downloader downloader = Downloader(version, input_directory_path, source_list_filepath, download_from) """ Explanation: Note that, as of August 25, 2020, the following sources are available only from the OPSD server and the data will be downloaded from it even if download_from is set to 'original_sources': - Energinet (DK) - Eurostat files which contain correspondence tables between postal codes and NUTS. The original links which should be downloaded from OPSD are marked as inactive in the column active in the above dataframe. Set up the downloader for data sources The Downloader class in the util package is responsible for downloading the original files to appropriate folders. In order to access its functionality, we have to instantiate it first. End of explanation """ #import importlib #importlib.reload(util.nuts_converter) #importlib.reload(util.downloader) #from util.downloader import Downloader #downloader = Downloader(version, input_directory_path, source_list_filepath, download_from) from util.nuts_converter import NUTSConverter nuts_converter = NUTSConverter(downloader, eurostat_eu_directory_path) """ Explanation: Set up the NUTS converter The NUTSConverter class in the util package uses the information on each facility's postcode, municipalty name, municipality code, longitude, and latitude to assign it correct NUTS 2016 level 1, 2, and 3 codes. Here, we instantiate the converter so that we can use it later. End of explanation """ # Get column translation list columnnames = pd.read_csv(os.path.join('input', 'column_translation_list.csv')) columnnames.head(2) # Get value translation list valuenames = pd.read_csv(os.path.join('input', 'value_translation_list.csv')) valuenames.head(2) """ Explanation: Setup translation dictionaries Column and value names of the original data sources will be translated to English and standardized across different sources. Standardized column names, e.g. "electrical_capacity" are required to merge data in one DataFrame.<br> The column and the value translation lists are provided in the input folder of the Data Package. End of explanation """ # Define the lists of source names downloader = Downloader(version, input_directory_path, source_list_filepath, download_from) tsos = ['50Hertz', 'Amprion', 'TenneT', 'TransnetBW'] datasets = ['50Hertz', 'Amprion', 'TenneT', 'TransnetBW','bnetza','bnetza_pv','bnetza_pv_historic'] # Download the files and get the local file paths indexed by source names filepaths = downloader.download_data_for_country('DE') # Remove the Eurostat NUTS file as it's a geoinformation source DE_postcode2nuts_filepath = filepaths.pop('Eurostat') # Open all data sets before processing. filenames = {} for source in filepaths: filepath = filepaths[source] print(source, filepath) if os.path.splitext(filepath)[1] != '.xlsx' and zipfile.is_zipfile(filepath): filenames[source] = zipfile.ZipFile(filepath) else: filenames[source] = filepath # Read TSO data from the zip files dfs = {} basenames_by_tso = { '50Hertz': '50Hertz Transmission GmbH EEG-Zahlungen Stammdaten 2019', 'Amprion': 'Amprion GmbH EEG-Zahlungen Anlagenstammdaten 2019', 'TenneT': 'TenneT TSO GmbH Anlagenstammdaten 2019', 'TransnetBW': 'TransnetBW GmbH Anlagenstammdaten 2019', } for tso in tsos: filename = basenames_by_tso[tso]+'.csv' print('Reading', filename) #print(filenames[tso].namelist()) dfs[tso] = pd.read_csv( filenames[tso].open(filename), sep=';', thousands='.', decimal=',', # Headers have to have the same order for all TSOs. Therefore just define headers here. # Remove the following three lines if for next version, headers should be read out initially # to then check if order is the same everywhere. names=['EEG-AnlagenschlÃŒssel', 'MASTR_Nr_EEG','Netzbetreiber Betriebsnummer','Netzbetreiber Name', 'Strasse_flurstueck','PLZ','Ort / Gemarkung','GemeindeschlÃŒssel','Bundesland', 'Installierte Leistung','EnergietrÀger','Spannungsebene','Leistungsmessung','Regelbarkeit', 'Inbetriebnahme','Außerbetriebnahme','Netzzugang','Netzabgang'], header=None, skiprows=1, parse_dates=[14, 15, 16, 17], #[11, 12, 13, 14] #infer_datetime_format=True, date_parser = lambda x: pd.to_datetime(x, errors='coerce', format='%d.%m.%Y'), encoding='iso-8859-1', dayfirst=True, low_memory=False ) print('Done reading ' + filename) for filename in filenames.values(): if(isinstance(filename, zipfile.ZipFile)): #print(filename) filename.close() # define the date parser def date_parser(x): if type(x) == str: return datetime.datetime.strptime(x, '%D.%M.%Y') elif type(x) == float and pd.isnull(x): return pd.NaT def inspect(x): try: converted = datetime.datetime.strptime(x, '%d.%m.%Y') return False except: return True # Read BNetzA register print('Reading bnetza: '+filenames['bnetza']) dfs['bnetza'] = pd.read_excel(filenames['bnetza'], sheet_name='GesamtÃŒbersicht', header=0, converters={'4.9 Postleit-zahl': str, 'Gemeinde-SchlÃŒssel': str} ) skiprows = {'bnetza_pv_historic': 10, 'bnetza_pv': 9} for dataset in ['bnetza_pv', 'bnetza_pv_historic']: print(dataset) print('Reading ' + dataset + ': ' + filenames[dataset]) xls_handle = pd.ExcelFile(filenames[dataset]) print('Concatenating all '+dataset+' sheets into one dataframe') dfs[dataset] = pd.concat( (xls_handle.parse( sheet, skiprows=skiprows[dataset], converters={'Anlage \nPLZ': str} ) for sheet in xls_handle.sheet_names), sort=True ) # Make sure that the column `Inbetriebnahme-datum *)` (commissioning date) in the bnetza_pv set is datetime. mask = dfs['bnetza_pv']['Inbetriebnahme-datum *)'].apply(lambda x: type(x) == int) dfs['bnetza_pv']['Inbetriebnahme-datum *)'] = pd.to_datetime(dfs['bnetza_pv']['Inbetriebnahme-datum *)'], errors='coerce', dayfirst=True, infer_datetime_format=True) dfs['bnetza_pv']['Inbetriebnahme-datum *)'] = dfs['bnetza_pv']['Inbetriebnahme-datum *)'].apply( lambda x: x.to_datetime64() ) dfs['bnetza_pv_historic'] = dfs['bnetza_pv_historic'].drop(['Unnamed: 7'], axis=1) pickle.dump( dfs, open( "intermediate/temp_dfs_DE_after_reading.pickle", "wb" ) ) dfs = pickle.load( open( "intermediate/temp_dfs_DE_after_reading.pickle", "rb" ) ) """ Explanation: Download and process per country For one country after the other, the original data is downloaded, read, processed, translated, eventually georeferenced and saved. If respective files are already in the local folder, these will be utilized. To process the provided data pandas DataFrame is applied.<br> Germany DE Download and read The data which will be processed below is provided by the following data sources: Netztransparenz.de - Official grid transparency platform from the German Transmission System Operators (TSOs): 50Hertz, Amprion, TenneT and TransnetBW. Bundesnetzagentur (BNetzA) - German Federal Network Agency for Electricity, Gas, Telecommunications, Posts and Railway (In separate files for data for roof-mounted PV power plants and for all other renewable energy power plants.) Data URL for BNetzA gets updated every few month. To be sure, always check if the links (url_bnetza; url_bnetza_pv) are up to date. End of explanation """ # Choose the translation terms for Germany, create dictionary and show dictionary columnnames = pd.read_csv(os.path.join('input', 'column_translation_list.csv')) idx_DE = columnnames[columnnames['country'] == 'DE'].index column_dict_DE = columnnames.loc[idx_DE].set_index('original_name')['opsd_name'].to_dict() column_dict_DE # Start the column translation process for each original data source print('Translation...') for dataset in dfs: # Remove newlines and any other duplicate whitespaces in column names: dfs[dataset] = dfs[dataset].rename(columns={col: re.sub(r"\s+", ' ', col) for col in dfs[dataset].columns}) # Do column name translations print(dataset) #print(list(dfs[dataset].columns)) dfs[dataset].rename(columns=column_dict_DE, inplace=True) #print(list(dfs[dataset].columns).index('decommissioning_date')) #print('--------------------------------------------') print('done.') """ Explanation: Translate column names To standardise the DataFrame the original column names from the German TSOs and the BNetzA wil be translated and new English column names wil be assigned to the DataFrame. The unique column names are required to merge the DataFrame.<br> The column_translation_list is provided here as csv in the input folder. It is loaded in 2.3 Setup of translation dictionaries. End of explanation """ # Add data source names to the DataFrames for tso in tsos: dfs[tso]['data_source'] = tso dfs[tso]['tso'] = tso dfs['bnetza']['data_source'] = 'BNetzA' dfs['bnetza_pv']['data_source'] = 'BNetzA_PV' dfs['bnetza_pv_historic']['data_source'] = 'BNetzA_PV_historic' # Add for the BNetzA PV data the energy source level 2 dfs['bnetza_pv']['energy_source_level_2'] = 'Photovoltaics' dfs['bnetza_pv_historic']['energy_source_level_2'] = 'Photovoltaics' # Select those columns of the original data which are utilised further dfs['bnetza'] = dfs['bnetza'].loc[:, ('commissioning_date', 'decommissioning_date', 'notification_reason', 'energy_source_level_2', 'electrical_capacity_kW', 'thermal_capacity_kW', 'voltage_level', 'dso', 'eeg_id', 'bnetza_id', 'federal_state', 'postcode', 'municipality_code', 'municipality', 'address', 'address_number', 'utm_zone', 'utm_east', 'utm_north', 'data_source')] for dataset in datasets: print(dataset+':'); display(dfs[dataset].tail(2)) """ Explanation: Add information and choose columns All data source names and for the BNetzA-PV data the energy source level 2 will added. End of explanation """ # Merge DataFrames of each original source into a common DataFrame DE_renewables dfs_list = [] for dataset in datasets: dfs_list.append(dfs[dataset]) DE_renewables = pd.concat(dfs_list, sort=True) DE_renewables.head(2) DE_renewables.reset_index(drop=True, inplace=True) DE_renewables.head(2) """ Explanation: Merge DataFrames The individual DataFrames from the TSOs (Netztransparenz.de) and BNetzA are merged. End of explanation """ # Choose the translation terms for Germany, create dictionary and show dictionary valuenames = pd.read_csv(os.path.join('input', 'value_translation_list.csv')) idx_DE = valuenames[valuenames['country'] == 'DE'].index value_dict_DE = valuenames.loc[idx_DE].set_index('original_name')['opsd_name'].to_dict() value_dict_DE print('replacing...') # Replace all original value names by the OPSD value names. # Running time: some minutes. DE_renewables.replace(value_dict_DE, inplace=True) print('Done!') DE_renewables['postcode'] = DE_renewables['postcode'].apply(pd.to_numeric, errors='ignore') """ Explanation: Translate values and harmonize energy source level 2 Different German terms for energy source level 2, energy source level 3, technology and voltage levels are translated and harmonized across the individual data sources. The value_translation_list is provided here as csv in the input folder. It is loaded in 2.3 Setup of translation dictionaries. End of explanation """ # Create dictionary in order to assign energy_source to its subtype energy_source_dict_DE = valuenames.loc[idx_DE].set_index( 'opsd_name')['energy_source_level_2'].to_dict() # Column energy_source partly contains energy source level 3 and technology information, # thus this column is copied to new column technology... DE_renewables['technology'] = DE_renewables['energy_source_level_2'] # ...and the energy source level 2 values are replaced by the higher level classification DE_renewables['energy_source_level_2'].replace(energy_source_dict_DE, inplace=True) # Choose energy source level 2 entries where energy_source is "Bioenergy" in order to # separate Bioenergy subtypes to "energy_source_level_3" and subtypes for the rest to "technology" idx_DE_Bioenergy = DE_renewables[DE_renewables['energy_source_level_2'] == 'Bioenergy'].index # Assign technology to energy source level 3 for all entries where energy source level 2 is # Bioenergy and delete those entries from technology DE_renewables[['energy_source_level_3']] = DE_renewables.iloc[idx_DE_Bioenergy][['technology']] DE_renewables.loc[idx_DE_Bioenergy]['technology'] = np.nan # Assign energy source level 1 to the dataframe DE_renewables['energy_source_level_1'] = 'Renewable energy' # Show the hierarchy of the energy types present in the frame energy_columns = ['energy_source_level_1', 'energy_source_level_2', 'energy_source_level_3', 'technology'] DE_renewables[energy_columns].drop_duplicates().sort_values(by='energy_source_level_2') """ Explanation: Separate and assign energy source level 1 - 3 and technology End of explanation """ drop_mask = DE_renewables['energy_source_level_2'].isin(['Other fossil fuels', 'Storage']) DE_renewables.drop(DE_renewables.index[drop_mask], axis=0, inplace=True) """ Explanation: According to the OPSD energy hierarchy, the power plants whose energy_source_level_2 is either Storage or Other fossil fuels do not belong to the class of renewable-energy facilities. Therefore, we can remove them. End of explanation """ # Electrical capacity per energy source level 2 (in MW) DE_renewables.groupby(['energy_source_level_2'])['electrical_capacity_kW'].sum() / 1000 """ Explanation: Summary of DataFrame End of explanation """ # kW to MW DE_renewables[['electrical_capacity_kW', 'thermal_capacity_kW']] /= 1000 # adapt column name DE_renewables.rename(columns={'electrical_capacity_kW': 'electrical_capacity', 'thermal_capacity_kW': 'thermal_capacity'}, inplace=True) """ Explanation: Transform electrical capacity from kW to MW End of explanation """ # Read generated postcode/location file postcode = pd.read_csv(os.path.join('input', 'de_tso_postcode_full.csv')) # Drop possible duplicates in postcodes postcode.drop_duplicates('postcode', keep='last', inplace=True) # Show first entries postcode.head(2) """ Explanation: Georeferencing Get coordinates by postcode (for data with no existing geocoordinates) The available post code in the original data provides a first approximation for the geocoordinates of the RE power plants.<br> The BNetzA data provides the full zip code whereas due to data privacy the TSOs only report the first three digits of the power plant's post code (e.g. 024xx) and no address. Subsequently a centroid of the post code region polygon is used to find the coordinates. With data from * http://www.suche-postleitzahl.org/downloads?download=plz-gebiete.shp.zip * http://www.suche-postleitzahl.org/downloads?download_file=plz-3stellig.shp.zip * http://www.suche-postleitzahl.org/downloads a CSV-file for all existing German post codes with matching geocoordinates has been compiled. The latitude and longitude coordinates were generated by running a PostgreSQL + PostGIS database. Additionally the respective TSO has been added to each post code. (A Link to the SQL script will follow here later) (License: http://www.suche-postleitzahl.org/downloads, Open Database Licence for free use. Source of data: © OpenStreetMap contributors) End of explanation """ # Take postcode and longitude/latitude information postcode = postcode[['postcode', 'lon', 'lat']] # Cast DE_renewables['postcode'] to int64 in order to do the natural join of the dataframes DE_renewables['postcode'] = pd.to_numeric(DE_renewables['postcode'], errors='coerce') # Join two dataframes DE_renewables = DE_renewables.merge(postcode, on=['postcode'], how='left') """ Explanation: Merge geometry information by using the postcode End of explanation """ DE_renewables.groupby(['utm_zone'])['utm_zone'].count() """ Explanation: Transform geoinformation (for data with already existing geoinformation) In this section the existing geoinformation (in UTM-format) will be transformed into latidude and longitude coordiates as a uniform standard for geoinformation. The BNetzA data set offers UTM Geoinformation with the columns utm_zone (UTM-Zonenwert), utm_east and utm_north. Most of utm_east-values include the utm_zone-value 32 at the beginning of the number. In order to properly standardize and transform this geoinformation into latitude and longitude it is necessary to remove this utm_zone value. For all UTM entries the utm_zone 32 is used by the BNetzA. |utm_zone| utm_east| utm_north| comment| |---|---|---| ----| |32| 413151.72| 6027467.73| proper coordinates| |32| 32912159.6008| 5692423.9664| caused error by 32| How many different utm_zone values are in the data set? End of explanation """ # Find entries with 32 value at the beginning idx_32 = (DE_renewables['utm_east'].astype(str).str[:2] == '32') idx_notnull = DE_renewables['utm_east'].notnull() # Remove 32 from utm_east entries DE_renewables.loc[idx_32, 'utm_east'] = DE_renewables.loc[idx_32, 'utm_east'].astype(str).str[2:].astype(float) def convert_to_latlon(utm_east, utm_north, utm_zone): try: return utm.to_latlon(utm_east, utm_north, utm_zone, 'U') except: return '' DE_renewables['latlon'] = DE_renewables.loc[idx_notnull, ['utm_east', 'utm_north', 'utm_zone']].apply( lambda x: convert_to_latlon(x[0], x[1], x[2]), axis=1).astype(str) """ Explanation: Remove the utm_zone "32" from the utm_east value End of explanation """ lat = [] lon = [] for row in DE_renewables['latlon']: try: # Split tuple format into the column lat and lon row = row.lstrip('(').rstrip(')') parts = row.split(',') if(len(parts)<2): raise Exception('This is not a proper tuple. So go to exception block.') lat.append(parts[0]) lon.append(parts[1]) except: # set NaN lat.append(np.NaN) lon.append(np.NaN) DE_renewables['latitude'] = pd.to_numeric(lat) DE_renewables['longitude'] = pd.to_numeric(lon) # Add new values to DataFrame lon and lat DE_renewables['lat'] = DE_renewables[['lat', 'latitude']].apply( lambda x: x[1] if pd.isnull(x[0]) else x[0], axis=1) DE_renewables['lon'] = DE_renewables[['lon', 'longitude']].apply( lambda x: x[1] if pd.isnull(x[0]) else x[0], axis=1) """ Explanation: Conversion UTM to latitude and longitude End of explanation """ #DE_renewables[DE_renewables['data_source'] == '50Hertz'].to_excel('test.xlsx') print('Missing coordinates ', DE_renewables.lat.isnull().sum()) display( DE_renewables[DE_renewables.lat.isnull()].groupby( ['energy_source_level_2','data_source'] )['data_source'].count() ) print('Share of missing coordinates (note that NaN can mean it\'s all fine):') DE_renewables[DE_renewables.lat.isnull()].groupby( ['energy_source_level_2','data_source'] )['data_source'].count() / DE_renewables.groupby( ['energy_source_level_2','data_source'] )['data_source'].count() """ Explanation: Check: missing coordinates by data source and type End of explanation """ # drop lonlat column that contains both, latitute and longitude DE_renewables.drop(['latlon', 'longitude', 'latitude'], axis=1, inplace=True) """ Explanation: Remove temporary columns End of explanation """ pickle.dump(DE_renewables, open( "intermediate/temp_dfs_DE_before_cleaning.pickle", "wb" ) ) DE_renewables = pickle.load( open( "intermediate/temp_dfs_DE_before_cleaning.pickle", "rb" ) ) """ Explanation: Save temporary Pickle (to have a point to quickly return to if things break after this point): End of explanation """ # Remove out-of-range dates # Keep only values between 1900 and 2100 to rule out outliers / wrong values. # Also, Excel doesn't support dates before 1900.. mask = ((DE_renewables['commissioning_date']>pd.Timestamp('1900')) & (DE_renewables['commissioning_date']<pd.Timestamp('2100'))) DE_renewables = DE_renewables[mask] DE_renewables['municipality_code'] = DE_renewables['municipality_code'].astype(str) # Remove spaces from municipality code DE_renewables['municipality_code'] = DE_renewables['municipality_code'].str.replace(' ', '', regex=False) DE_renewables['municipality_code'] = pd.to_numeric(DE_renewables['municipality_code'], errors='coerce', downcast='integer') # Merge address and address_number to_string = lambda x: str(x) if not pd.isnull(x) else '' DE_renewables['address'] = DE_renewables['address'].map(to_string) + ' ' + DE_renewables['address_number'].map(to_string) # Make sure that the column has no whitespaces at the beginning and the end DE_renewables['address'] = DE_renewables['address'].str.strip() # Remove the column with address numbers as it is not needed anymore del DE_renewables['address_number'] """ Explanation: Clean data End of explanation """ # Set up a temporary postcode column as a string column for joining with the appropriate NUTS correspondence table DE_renewables['postcode_str'] = DE_renewables['postcode'].astype(str).str[:-2] DE_renewables = nuts_converter.add_nuts_information(DE_renewables, 'DE', DE_postcode2nuts_filepath, postcode_column='postcode_str', how=['postcode', 'municipality_code', 'municipality', 'latlon']) # Drop the temporary column DE_renewables.drop('postcode_str', axis='columns', inplace=True) # Report the number of facilites whose NUTS codes were successfully sudetermined determined = DE_renewables['nuts_1_region'].notnull().sum() print('NUTS successfully determined for', determined, 'out of', DE_renewables.shape[0], 'facilities in DE.') # Report the number of facilites whose NUTS codes could not be determined not_determined = DE_renewables['nuts_1_region'].isnull().sum() print('NUTS could not be determined for', not_determined, 'out of', DE_renewables.shape[0], 'facilities in DE.') """ Explanation: Assign NUTS codes End of explanation """ visualize_points(DE_renewables['lat'], DE_renewables['lon'], 'Germany', categories=DE_renewables['energy_source_level_2'] ) """ Explanation: Visualize End of explanation """ DE_renewables.to_pickle('intermediate/DE_renewables.pickle') del DE_renewables """ Explanation: Save The merged, translated, cleaned, DataFrame will be saved temporily as a pickle file, which stores a Python object fast. End of explanation """ # Download the data for Denmark filepaths = downloader.download_data_for_country('DK') print(filepaths) """ Explanation: Denmark DK Download and read The data which will be processed below is provided by the following data sources: Energistyrelsen (ens) / Danish Energy Agency - The wind turbines register is released by the Danish Energy Agency. Energinet.dk - The data of solar power plants are released by the leading transmission network operator Denmark. geonames.org - The postcode data from Denmark is provided by Geonames and licensed under a Creative Commons Attribution 3.0 license. Eurostat - The data for converting information on municipalities, postcodes and geographic coordinates to NUTS 2016 classification codes. End of explanation """ def read_dk_wind_turbines(filepath, sheet_name): # Reads the data on Danish wind turbines # from the sheet of the given name # in the file with the path. # Returns the data as a Pandas dataframe. book = xlrd.open_workbook(filepath) sheet = book.sheet_by_name(sheet_name) # Since the column names are in two rows, not one, # collect them in two parts. The first part is # fixed and contains column names. header = [] for i in range(0, 16): # Make sure that strings 1) do not contain the newline sign # and 2) have no trailing blank spaces. column_name = sheet.cell_value(17, i).replace("\n", "").strip() header = header + [column_name] # The second part is variable. It consists of two subparts: # 1) previous years (type float) # 2) the past months of the current year (type date) # Reading previous years as column names i = 16 cell = sheet.cell(16, i) while cell.ctype == xlrd.XL_CELL_NUMBER: column_name = str(int(cell.value)) header = header + [column_name] i = i + 1 cell = sheet.cell(16, i) # Reading the months of the current year as column names while cell.ctype == xlrd.XL_CELL_DATE: year, month, _, _, _, _ = xlrd.xldate_as_tuple(cell.value, book.datemode) column_name = str("{}-{}".format(year, month)) header = header + [column_name] i = i + 1 cell = sheet.cell(16, i) # Add the final column for the total of the current year header += ['{}-total'.format(header[-1].split('-')[0])] # Skip the first 17 rows in the sheet. The rest contains the data. df = pd.read_excel(filepath, sheet_name=sheet_name, skiprows=17, skipfooter=3 ) # #df.drop(df.columns[len(df.columns)-1], axis=1, inplace=True) # Set the column names. df.columns = header return df # Get wind turbines data wind_turbines_sheet_name = 'IkkeAfmeldte-Existing turbines' DK_wind_filepath = filepaths['Energistyrelsen'] DK_wind_df = read_dk_wind_turbines(DK_wind_filepath, wind_turbines_sheet_name ) # Get photovoltaic data DK_solar_filepath = filepaths['Energinet'] DK_solar_df = pd.read_excel(DK_solar_filepath, sheet_name='Data', skiprows=[0], converters={'Postnr': str} ) # Remove duplicates DK_wind_df.drop_duplicates(inplace=True) DK_solar_df.drop_duplicates(inplace=True) """ Explanation: The function for reading the data on the wind turbines. End of explanation """ # Choose the translation terms for Denmark, create dictionary and show dictionary idx_DK = columnnames[columnnames['country'] == 'DK'].index column_dict_DK = columnnames.loc[idx_DK].set_index('original_name')['opsd_name'].to_dict() # Windows has problems reading the csv entry for east and north (DK). # The reason might be the difference when opening the csv between linux and # windows. column_dict_DK_temp = {} for k, v in column_dict_DK.items(): column_dict_DK_temp[k] = v if v == 'utm_east' or v == 'utm_north': # merge 2 lines to 1 new_key = ''.join(k.splitlines()) column_dict_DK_temp[new_key] = v column_dict_DK = column_dict_DK_temp column_dict_DK # Replace column names based on column_dict_DK DK_wind_df.rename(columns=column_dict_DK, inplace=True) DK_solar_df.rename(columns=column_dict_DK, inplace=True) """ Explanation: Translate column names End of explanation """ # Add names of the data sources to the DataFrames DK_wind_df['data_source'] = 'Energistyrelsen' DK_solar_df['data_source'] = 'Energinet.dk' # Add energy source level 2 and technology for each of the two DataFrames DK_wind_df['energy_source_level_2'] = 'Wind' DK_solar_df['energy_source_level_2'] = 'Solar' DK_solar_df['technology'] = 'Photovoltaics' """ Explanation: Add data source and missing information End of explanation """ mask=DK_solar_df['commissioning_date'] == '1970-01-01' DK_solar_df.loc[mask, 'commissioning_date'] = np.nan """ Explanation: Correct the dates Some dates in the Energinet dataset are equal to 1970-01-01, which should be NaN instead End of explanation """ # Choose the translation terms for Denmark, create dictionary and show dictionary idx_DK = valuenames[valuenames['country'] == 'DK'].index value_dict_DK = valuenames.loc[idx_DK].set_index('original_name')['opsd_name'].to_dict() # Replace all original value names by the OPSD value names DK_wind_df.replace(value_dict_DK, inplace=True) DK_solar_df.replace(value_dict_DK, inplace=True) """ Explanation: Translate values and harmonize energy source level 2 End of explanation """ # Index for all values with utm information idx_notnull = DK_wind_df['utm_east'].notnull() # Convert from UTM values to latitude and longitude coordinates DK_wind_df['lonlat'] = DK_wind_df.loc[idx_notnull, ['utm_east', 'utm_north'] ].apply(lambda x: utm.to_latlon(x[0], x[1], 32, 'U'), axis=1).astype(str) # Split latitude and longitude in two columns lat = [] lon = [] for row in DK_wind_df['lonlat']: try: # Split tuple format # into the column lat and lon row = row.lstrip('(').rstrip(')') lat.append(row.split(',')[0]) lon.append(row.split(',')[1]) except: # set NAN lat.append(np.NaN) lon.append(np.NaN) DK_wind_df['lat'] = pd.to_numeric(lat) DK_wind_df['lon'] = pd.to_numeric(lon) # drop lonlat column that contains both, latitute and longitude DK_wind_df.drop('lonlat', axis=1, inplace=True) """ Explanation: Georeferencing UTM32 to latitude and longitude (Data from Energistyrelsen) The Energistyrelsen data set offers UTM Geoinformation with the columns utm_east and utm_north belonging to the UTM zone 32. In this section the existing geoinformation (in UTM-format) will be transformed into latidude and longitude coordiates as a uniform standard for geoinformation. End of explanation """ # Get geo-information zip_DK_geo = zipfile.ZipFile(filepaths['Geonames']) # Read generated postcode/location file DK_geo = pd.read_csv(zip_DK_geo.open('DK.txt'), sep='\t', header=None) # add column names as defined in associated readme file DK_geo.columns = ['country_code', 'postcode', 'place_name', 'admin_name1', 'admin_code1', 'admin_name2', 'admin_code2', 'admin_name3', 'admin_code3', 'lat', 'lon', 'accuracy'] # Drop rows of possible duplicate postal_code DK_geo.drop_duplicates('postcode', keep='last', inplace=True) DK_geo['postcode'] = DK_geo['postcode'].astype(str) # Add longitude/latitude infomation assigned by postcode (for Energinet.dk data) DK_solar_df = DK_solar_df.merge(DK_geo[['postcode', 'lon', 'lat']], on=['postcode'], how='left') # Show number of units with missing coordinates separated by wind and solar print('Missing Coordinates DK_wind', DK_wind_df.lat.isnull().sum(), 'out of', len(DK_wind_df.index)) print('Missing Coordinates DK_solar', DK_solar_df.lat.isnull().sum(), 'out of', len(DK_solar_df.index)) zip_DK_geo.close() """ Explanation: Postcode to lat/lon (WGS84) (for data from Energinet.dk) The available post code in the original data provides an approximation for the geocoordinates of the solar power plants.<br> The postcode will be assigned to latitude and longitude coordinates with the help of the postcode table. End of explanation """ # Merge DataFrames for wind and solar into DK_renewables dataframes = [DK_wind_df, DK_solar_df] DK_renewables = pd.concat(dataframes, sort=False) DK_renewables = DK_renewables.reset_index() # Assign energy source level 1 to the dataframe DK_renewables['energy_source_level_1'] = 'Renewable energy' # Merge the address and address-number columns into one to_string = lambda x: str(x) if not pd.isnull(x) else "" DK_renewables['address'] = DK_renewables['address'].map(to_string) + " " + DK_renewables['address_number'].map(to_string) # Make sure that the column has no whitespaces at the beginning or the end DK_renewables['address'] = DK_renewables['address'].str.strip() # Assign NUTS codes DK_postcode2nuts = filepaths['Eurostat'] DK_renewables = nuts_converter.add_nuts_information(DK_renewables, 'DK', DK_postcode2nuts, how=['latlon', 'postcode', 'municipality_code', 'municipality_name']) # Report the number of facilites whose NUTS codes were successfully sudetermined determined = DK_renewables['nuts_1_region'].notnull().sum() print('NUTS successfully determined for', determined, 'out of', DK_renewables.shape[0], 'facilities in DK.') # Report the number of facilites whose NUTS codes could not be determined not_determined = DK_renewables['nuts_1_region'].isnull().sum() print('NUTS could not be determined for', not_determined, 'out of', DK_renewables.shape[0], 'facilities in DK.') """ Explanation: Merge DataFrames, add NUTS information and choose columns End of explanation """ DK_renewables[DK_renewables['nuts_1_region'].isnull()][['municipality', 'municipality_code', 'lat', 'lon']] """ Explanation: Let us check geoinformation on the facilities for which NUTS codes could not be determined. End of explanation """ # Select those columns of the orignal data which are utilised further columns_of_interest = ['commissioning_date', 'energy_source_level_1', 'energy_source_level_2', 'technology', 'electrical_capacity_kW', 'dso', 'gsrn_id', 'postcode', 'municipality_code', 'municipality', 'address', 'utm_east', 'utm_north', 'lon', 'lat', 'nuts_1_region', 'nuts_2_region', 'nuts_3_region', 'hub_height', 'rotor_diameter', 'manufacturer', 'model', 'data_source'] # Clean DataFrame from columns other than specified above DK_renewables = DK_renewables.loc[:, columns_of_interest] DK_renewables.reset_index(drop=True, inplace=True) """ Explanation: As we see, no information on municipality and latitude/longitude coordinates are present for those power plants, so there was no possibility to assign them their NUTS codes. Select columns End of explanation """ # Remove duplicates DK_renewables.drop_duplicates(inplace=True) DK_renewables.reset_index(drop=True, inplace=True) """ Explanation: Remove duplicate rows End of explanation """ # kW to MW DK_renewables['electrical_capacity_kW'] /= 1000 # adapt column name DK_renewables.rename(columns={'electrical_capacity_kW': 'electrical_capacity'}, inplace=True) """ Explanation: Transform electrical_capacity from kW to MW End of explanation """ visualize_points(DK_renewables['lat'], DK_renewables['lon'], 'Denmark', categories=DK_renewables['energy_source_level_2'] ) """ Explanation: Visualize End of explanation """ DK_renewables.to_pickle('intermediate/DK_renewables.pickle') del DK_renewables """ Explanation: Save End of explanation """ # Download the data filepaths = downloader.download_data_for_country('FR') # Show the local paths filepaths """ Explanation: France FR The data which will be processed below is provided by the following data sources: Ministry for Ecological and Inclusive Transition - Number of installations and installed capacity of the different renewable source for every municipality in France. Data until 31/12/2017. As of 2020, this dataset is no longer maintained by the ministry and we refer to it as the old dataset. ODRÉ - The Open Data Réseaux Énergies (ODRÉ, Open Data Networks for Energy) platform provides stakeholders with data around the themes of Production, Multi-energy Consumption, Storage, Mobility, Territories and Regions, Infrastructure, Markets and Meteorology. As of 2020, we refer to this dataset as the new dataset. It contains the data up to 31/12/2018. OpenDataSoft - a list of French INSEE codes and corresponding coordinates, published under the Licence Ouverte (Etalab). End of explanation """ # Load the data FR_re_filepath = filepaths['ODRE'] FR_re_df = pd.read_csv(FR_re_filepath, sep=';', parse_dates=['dateRaccordement', 'dateDeraccordement', 'dateMiseEnService', 'dateDebutVersion'], infer_datetime_format=True) # Make sure that the column dateDeraccordement is datetime FR_re_df['dateDeraccordement'] = pd.to_datetime(FR_re_df['dateDeraccordement'], errors='coerce') """ Explanation: ODRE data Load the data End of explanation """ # Choose the translation terms for France, create dictionary and show it columnnames = pd.read_csv(os.path.join('input', 'column_translation_list.csv')) idx_FR = columnnames[(columnnames['country'] == 'FR') & (columnnames['data_source'] == 'ODRE')].index column_dict_FR = columnnames.loc[idx_FR].set_index('original_name')['opsd_name'].to_dict() column_dict_FR # Translate column names FR_re_df.rename(columns=column_dict_FR, inplace=True) # Keep only the columns specified in the translation dictionary as we'll need only them columns_to_keep = list(column_dict_FR.values()) FR_re_df = FR_re_df.loc[:, columns_to_keep] FR_re_df.reset_index(drop=True, inplace=True) # Show a pair of rows FR_re_df.head(2) """ Explanation: Translate column names End of explanation """ FR_re_df['data_source'] = 'Open Data Réseaux Énergies' FR_re_df['as_of_year'] = 2018 # Year for which the dataset has been compiled by the data source """ Explanation: Add data source End of explanation """ # Choose the translation terms for France, create a dictionary and show it valuenames = pd.read_csv(os.path.join('input', 'value_translation_list.csv')) idx_FR = valuenames[(valuenames['country'] == 'FR') & (valuenames['data_source'] == 'ODRE')].index value_dict_FR = valuenames.loc[idx_FR].set_index('original_name')['opsd_name'].to_dict() value_dict_FR # Replace all original value names by the OPSD value names FR_re_df.replace(value_dict_FR, inplace=True) """ Explanation: Translate values End of explanation """ no_name_aliases = ['Agrégation des installations de moins de 36KW', 'Confidentiel', 'confidentiel'] no_name_mask = FR_re_df['site_name'].isin(no_name_aliases) FR_re_df.loc[no_name_mask, 'site_name'] = np.nan """ Explanation: Correct site names Some facilites do not come with their names. Instead, strings such as Agrégation des installations de moins de 36KW, Confidentiel and confidentiel are used. Here, we correct this by setting all such names to np.nan. End of explanation """ mask = (FR_re_df['commissioning_date'].dt.year <= 1900) &\ ((FR_re_df['technology'].isin(['Photovoltaics', 'Onshore']) |\ (FR_re_df['energy_source_level_2'] == 'Solar'))) FR_re_df.loc[mask, 'commissioning_date'] = np.nan #for x in FR_re_df[FR_re_df['commissioning_date'].dt.year <= 1980]['technology']: # print(x) """ Explanation: Replace suspicious dates with N/A The commissioning dates of some solar and wind plants are set in the early 20th and late 19th centuries. We replace those dates with N/A since they do not make sense. End of explanation """ # Check the columns FR_re_df.isnull().all() """ Explanation: Check missing values Now, we will drop all the columns and all the rows which contain only null values. End of explanation """ # Check the rows print('There is a row containing all the null values?') FR_re_df.isnull().all(axis=1).any() """ Explanation: As we see above, no column contains only the null value, so we do not need to drop any. End of explanation """ FR_re_df[['energy_source_level_2', 'technology']].drop_duplicates() """ Explanation: No row contains only the null values, so no need to for filtering on that basis. Standardize the energy types and technologies Now, we proceed with standardizing the energy types and technologies present in the data according to the OPSD energy hierarchy. End of explanation """ # Define the mask for selecting rows with unusable info on electrical capacity ec_mask = (FR_re_df['electrical_capacity'] == 0) | (FR_re_df['electrical_capacity'].isna()) # Define the mask for selecting the rows with non-renewable energy_source_level_2 non_renewable_esl2 = ['Non-renewable thermal', 'Non-hydraulic storage', 'Nuclear'] non_renewable_esl2_mask = FR_re_df['energy_source_level_2'].isin(non_renewable_esl2) # Define the mask to select the rows with non-renewable technology non_renewable_technologies = ['Steam turbine', 'Combustion cogeneration', 'Combustion engine', 'Combined cycle', 'Pumped storage', 'Piston motor', 'Nuclear fission'] non_renewable_technology_mask = FR_re_df['technology'].isin(non_renewable_technologies) # Define the mask to select the rows without specified energy type and technology other_mask = (FR_re_df['energy_source_level_2'] == 'Other') & \ ((FR_re_df['technology'] == 'Other') | (pd.isnull(FR_re_df['technology']))) # Combine the masks drop_mask = ec_mask | non_renewable_esl2_mask | non_renewable_technology_mask | other_mask # Show how many rows are going to be dropped print('Dropping', drop_mask.sum(), 'rows out of', FR_re_df.shape[0]) # Keep all the rows not selected by the drop mask keep_mask = ~drop_mask FR_re_df = FR_re_df[keep_mask].reindex() # Show some rows print("A sample of the kept data:") FR_re_df.sample(5) """ Explanation: In order to facilitate further processing, we can remove the rows that we know for sure we won't need. Those are the rows satisfying either of the following conditions: * electrical_capacity is 0 or NaN, * energy_source_level_2 corresponds to a non-renewable energy type (Non-renewable thermal, Non-hydraulic storage, Nuclear), * technology indicates that a non-renewable technology is used at the facility (Steam turbine, Combustion cogeneration, Combustion engine, Combined cycle, Pumped storage, Piston motor, Nuclear fission). * energy_source_level_2 is Other and technology is Other or NaN. End of explanation """ FR_re_df[['energy_source_level_2', 'technology']].drop_duplicates() """ Explanation: Standardize source levels 1-3 and technology Let us see the energy types and technologies present in the filtered data. End of explanation """ # Make sure that the proper string is used to indicate other or unspecified technology FR_re_df['technology'].replace('Other', 'Other or unspecified technology', inplace=True) # Define a function that will deal with other cases def standardize(row): level_2 = row['energy_source_level_2'] technology = row['technology'] if level_2 in ['Marine', 'Geothermal', 'Bioenergy']: technology = np.nan elif level_2 in ['Solar', 'Hydro', 'Other'] and pd.isna(technology): technology = 'Other or unspecified technology' elif level_2 == 'Wind' and (pd.isna(technology) or technology == 'Other or unspecified technology'): technology = 'Onshore' if level_2 == 'Hydro' and technology in ['Lake', 'Closed']: technology = 'Other or unspecified technology' elif level_2 == 'Solar' and technology == 'Thermodynamic': technology = 'Other or unspecified technology' elif level_2 == 'Other' and technology == 'Photovoltaics': level_2 = 'Solar' return [level_2, technology] # Apply the rules coded in function standardize FR_re_df[['energy_source_level_2', 'technology']] = FR_re_df.apply(standardize, axis=1, result_type='expand') # Show the existing level 2 types and technologies FR_re_df[['energy_source_level_2', 'technology']].drop_duplicates() """ Explanation: First, let us standardize the values for energy source level 2 and technology. 1. We will use np.nan to indicate that technology should not be specified for the respective kind of sources according to the OPSD hierarchy. 2. 'Other or unspecified technology' will mean that technology should be specified but it was unclear or missing in the original dataset. That means that we need to apply the following correction rules to the current data: - All occurences of Other in the column technology should be replaced with Other or unspecified technology. - If energy_source_level_2 is Marine, Geothermal, or Bioenergy, then technology should be set to np.nan regardless of what is specified in the data set. - If energy_source_level_2 is Solar or Hydro, and technology is NaN, then technology should be set to Other or unspecified technology. - If energy_source_level_2 is Wind and technology is NaN, then technology should be set to Onshore since France has no offshore wind farms. - If energy_source_level_2 is Hydro and technology is Lake or Closed, then technology should be set to Other or unspecified technology. - If energy_source_level_2 is Solar and technology is Thermodynamic, then technology should be set to Other or unspecified technology. - If energy_source_level_2 is Other and technology is Photovoltaics, then energy_source_level_2 should be set to Solar. End of explanation """ FR_re_df[['energy_source_level_2', 'energy_source_level_3']].drop_duplicates() """ Explanation: Let us now deal with the third level of the energy hierarchy. Only Bioenergy has the third level. Information on it can be found in the column energy_source_level_3 (whose original name was combustible). End of explanation """ index = (pd.isna(FR_re_df['energy_source_level_3']) & \ (FR_re_df['energy_source_level_2'] == 'Bioenergy')) FR_re_df.loc[index, 'energy_source_level_3'] = 'Other or unspecified' index = FR_re_df['energy_source_level_3'] == 'Wood' FR_re_df.loc[index, 'energy_source_level_3'] = 'Biomass and biogas' """ Explanation: We see that only the following two corrections are needed: - If energy_source_level_3 is Wood, set energy_source_level_3 to Biomass and biogas. - If energy_source_level_3 is NaN, and energy_source_level_2 is Bioenergy, set energy_source_level_3 to Other or unspecified. End of explanation """ # Assign energy_source_level_1 to the dataframe FR_re_df['energy_source_level_1'] = 'Renewable energy' # Show the hierarchy energy_columns = ['energy_source_level_1', 'energy_source_level_2', 'energy_source_level_3', 'technology'] FR_re_df[energy_columns].drop_duplicates() """ Explanation: Finally, we declare all the plants as renewable and show the final hierarchy. End of explanation """ # Get the local path of the downloaded georeferencing data FR_geo_filepath = filepaths['Opendatasoft'] # Read INSEE Code Data FR_geo = pd.read_csv(FR_geo_filepath, sep=';', header=0, converters={'Code_postal': str}) # Drop possible duplicates of the same INSEE code FR_geo.drop_duplicates('INSEE_COM', keep='last', inplace=True) # create columns for latitude/longitude lat = [] lon = [] # split in latitude/longitude for row in FR_geo['Geo Point']: try: # Split tuple format # into the column lat and lon row = row.lstrip('(').rstrip(')') lat.append(row.split(',')[0]) lon.append(row.split(',')[1]) except: # set NAN lat.append(np.NaN) lon.append(np.NaN) # add these columns to the INSEE DataFrame FR_geo['lat'] = pd.to_numeric(lat) FR_geo['lon'] = pd.to_numeric(lon) # Column names of merge key have to be named identically FR_re_df.rename(columns={'municipality_code': 'INSEE_COM'}, inplace=True) # Merge longitude and latitude columns by the Code INSEE FR_re_df = FR_re_df.merge(FR_geo[['INSEE_COM', 'lat', 'lon']], on=['INSEE_COM'], how='left') # Translate Code INSEE column back to municipality_code FR_re_df.rename(columns={'INSEE_COM': 'municipality_code'}, inplace=True) """ Explanation: Georeferencing First, we will determine the plants' longitude and latitude coordinates, and then assign them their NUTS codes. Municipality (INSEE) code to lon/lat End of explanation """ #import importlib #importlib.reload(util.nuts_converter) #from util.nuts_converter import NUTSConverter #nuts_converter = NUTSConverter(downloader, eurostat_eu_directory_path) FR_postcode2nuts_path = filepaths['Eurostat'] FR_re_df = nuts_converter.add_nuts_information(FR_re_df, 'FR', FR_postcode2nuts_path, lau_name_type='NATIONAL', closest_approximation=True, how=['municipality_code', 'latlon']) # Report the number of facilites whose NUTS codes were successfully determined determined = FR_re_df['nuts_1_region'].notnull().sum() print('NUTS successfully determined for', determined, 'out of', FR_re_df.shape[0], 'facilities in FR.') # Report the number of facilites whose NUTS codes could not be determined not_determined = FR_re_df['nuts_1_region'].isnull().sum() print('NUTS could not be determined for', not_determined, 'out of', FR_re_df.shape[0], 'facilities in FR.') """ Explanation: Determine NUTS codes End of explanation """ # Check the facilities without NUTS classification no_nuts = FR_re_df['nuts_1_region'].isnull() # Find the masks where some information for finding the proper NUTS code is present lat_or_lon_present = ~(FR_re_df['lat'].isna() & FR_re_df['lon'].isna()) municipality_code_present = ~(FR_re_df['municipality_code'].isnull()) municipality_name_present = ~(FR_re_df['municipality'].isnull()) # Show the cases where NUTS classification failed even though it shouldn't have print('1. No NUTS code but latitude/longitude info present') problematic_lat_lon = FR_re_df[no_nuts & lat_or_lon_present][['lat', 'lon']] display(problematic_lat_lon) print('2. No NUTS code but municipality code info present') problematic_municipality_codes = FR_re_df[no_nuts & municipality_code_present]['municipality_code'].unique() display(problematic_municipality_codes) print('3. No NUTS code but municipality name info present') problematic_municipality_names = FR_re_df[no_nuts & municipality_name_present]['municipality'].unique() display(problematic_municipality_names) """ Explanation: Let us now check the facilities without NUTS classification. End of explanation """ # Check if the any problematic code is actually present in the translation table present_any = False for code in problematic_municipality_codes: mask = nuts_converter.municipality2nuts_df['municipality_code'].str.match(code) present_any = present_any or mask.any() print(present_any) """ Explanation: We see that no row with known longitude and latitude was left unclassified. What we also see is that some municipality codes did not translate to the corresponding NUTS codes. Further inspection shows that those codes are not present in the official NUTS translation tables. End of explanation """ # Print only the names of those problematic municipalities, which appear in the translation table only once. for name in problematic_municipality_names: mask = nuts_converter.municipality2nuts_df['municipality'].str.match(name) if mask.sum() == 1: print(name) """ Explanation: We also see that problematic municipality names are either not present in the official translation tables or more than one municipality in the tables bears them. End of explanation """ FR_re_df['electrical_capacity'] = FR_re_df['electrical_capacity'] / 1000 """ Explanation: Therefore, we can confirm that NUTS classification codes were determined with the highest precision possible. Convert electrical capacity to MW End of explanation """ # Load the data FR_re_filepath = filepaths['gouv.fr'] FR_re_df_old = pd.read_excel(FR_re_filepath, sheet_name='Commune', encoding='UTF8', thousands='.', decimals=',', header=[3, 4], skipfooter=9, # skip the summary rows index_col=[0, 1], # required for MultiIndex converters={'Code officiel géographique': str}) FR_re_df_old.tail() """ Explanation: Old data End of explanation """ # Rearrange data FR_re_df_old.index.rename(['insee_com', 'municipality'], inplace=True) FR_re_df_old.columns.rename(['energy_source_level_2', None], inplace=True) FR_re_df_old = (FR_re_df_old .stack(level='energy_source_level_2', dropna=False) .reset_index(drop=False)) # Choose the translation terms for France, create dictionary and show dictionary idx_FR = columnnames[(columnnames['country'] == 'FR') & (columnnames['data_source'] == 'gouv.fr')].index column_dict_FR = columnnames.loc[idx_FR].set_index('original_name')['opsd_name'].to_dict() column_dict_FR # Translate columnnames FR_re_df_old.rename(columns=column_dict_FR, inplace=True) # Drop all rows that contain NA FR_re_df_old = FR_re_df_old.dropna() FR_re_df_old.head(10) """ Explanation: This French data source contains number of installations and sum of installed capacity per energy source per municipality. The list is limited to the plants which are covered by article 10 of february 2000 by an agreement to a purchase commitment. End of explanation """ FR_re_df_old['data_source'] = 'Ministry for the Ecological and Inclusive Transition' FR_re_df_old['as_of_year'] = 2017 # Year for which the dataset has been compiled by the data source """ Explanation: Add data source End of explanation """ # Choose the translation terms for France, create dictionary and show dictionary idx_FR = valuenames[(valuenames['country'] == 'FR') & (valuenames['data_source'] == 'gouv.fr')].index value_dict_FR = valuenames.loc[idx_FR].set_index('original_name')['opsd_name'].to_dict() value_dict_FR # Replace all original value names by the OPSD value names FR_re_df_old.replace(value_dict_FR, inplace=True) """ Explanation: Translate values and harmonize energy source level 2 Kept secret if number of installations < 3 If the number of installations is less than 3, it is marked with an s instead of the number 1 or 2 due to statistical confidentiality (as explained by the data provider). Here, the s is changed to < 3. This is done in the same step as the other value translations of the energy sources. End of explanation """ energy_source_dict_FR = valuenames.loc[idx_FR].set_index( 'opsd_name')['energy_source_level_2'].to_dict() display(energy_source_dict_FR) display(FR_re_df_old[['energy_source_level_2']].drop_duplicates()) (FR_re_df_old['energy_source_level_2'].replace(energy_source_dict_FR).unique()) # Create dictionnary in order to assign energy_source to its subtype energy_source_dict_FR = valuenames.loc[idx_FR].set_index( 'opsd_name')['energy_source_level_2'].to_dict() # Column energy_source partly contains subtype information, thus this column is copied # to new column for energy_source_subtype. FR_re_df_old['technology'] = FR_re_df_old['energy_source_level_2'] # Only Photovoltaics should be kept as technology. Hydro should be changed to 'Other or unspecified technology', # Geothermal to NaN, and Wind to Onshore. # 1. np.nan means that technology should not be specified for the respective kind of sources # according to the hierarchy (http://open-power-system-data.org/2016-10-25-opsd_tree.svg) # 2. 'Other or unspecified technology' means that technology should be specified # but it was unclear or missing in the original dataset. technology_translation_dictionary = { 'Solar' : 'Photovoltaics', 'Wind': 'Onshore', 'Hydro': 'Other or unspecified technology', 'Geothermal': np.nan } FR_re_df_old['technology'].replace(technology_translation_dictionary, inplace=True) # The energy source subtype values in the energy_source column are replaced by # the higher level classification FR_re_df_old['energy_source_level_2'].replace(energy_source_dict_FR, inplace=True) # Assign energy_source_level_1 to the dataframe FR_re_df_old['energy_source_level_1'] = 'Renewable energy' FR_re_df_old.reset_index(drop=True, inplace=True) # Choose energy source level 2 entries where energy source level 2 is Bioenergy in order to # seperate Bioenergy subtypes to energy source level 3 and subtypes for the rest to technology idx_FR_Bioenergy = FR_re_df_old[FR_re_df_old['energy_source_level_2'] == 'Bioenergy'].index # Assign technology to energy source level 3 for all entries where energy source level 2 is # Bioenergy and delete those entries from technology FR_re_df_old[['energy_source_level_3']] = FR_re_df_old.iloc[idx_FR_Bioenergy][['technology']] FR_re_df_old.loc[idx_FR_Bioenergy,'technology'] = np.nan """ Explanation: Separate and assign energy source level 1-3 and technology End of explanation """ FR_re_df_old[['energy_source_level_1', 'energy_source_level_2', 'energy_source_level_3', 'technology']].drop_duplicates() """ Explanation: Show the hierarchy of the energy types present in the data. End of explanation """ # Column names of merge key have to be named identically FR_re_df_old.rename(columns={'municipality_code': 'INSEE_COM'}, inplace=True) # Merge longitude and latitude columns by the Code INSEE FR_re_df_old = FR_re_df_old.merge(FR_geo[['INSEE_COM', 'lat', 'lon']], on=['INSEE_COM'], how='left') # Translate Code INSEE column back to municipality_code FR_re_df_old.rename(columns={'INSEE_COM': 'municipality_code'}, inplace=True) """ Explanation: Georeferencing Municipality (INSEE) code to lat/lon End of explanation """ FR_postcode2nuts_path = filepaths['Eurostat'] FR_re_df_old = nuts_converter.add_nuts_information(FR_re_df_old, 'FR', FR_postcode2nuts_path, how=['municipality_code', 'latlon']) # how=['municipality', 'municipality_code', 'latlon'] # Report the number of facilites whose NUTS codes were successfully determined determined = FR_re_df_old['nuts_1_region'].notnull().sum() print('NUTS successfully determined for', determined, 'out of', FR_re_df_old.shape[0], 'facilities in FR.') # Report the number of facilites whose NUTS codes could not be determined not_determined = FR_re_df_old['nuts_1_region'].isnull().sum() print('NUTS could not be determined for', not_determined, 'out of', FR_re_df_old.shape[0], 'facilities in FR.') # Show the facilities without NUTS classification FR_re_df_old[FR_re_df_old['nuts_1_region'].isnull()] """ Explanation: Determine NUTS codes End of explanation """ # For each column present in the new data's column space, but not the old, # add an empty column to the old data. for new_column in FR_re_df.columns: if new_column not in FR_re_df.columns: FR_re_df_old[new_column] = np.nan # Define the mask to select the municipalities from the old data, that are not covered # by the new. not_included = ~(FR_re_df_old['municipality_code'].isin(FR_re_df['municipality_code'])) FR_re_df_old[not_included] # Add a dummy column to the new data frame # representing the number of power plants (always 1) FR_re_df['number_of_installations'] = 1 # Mark the old data rows as aggregations on municipality level. FR_re_df_old['site_name'] = 'Aggregated data for ' + FR_re_df_old['municipality'] # Concatenate the new data with the old rows referring to the municipalities # not covered by the new. FR_re_df = pd.concat([FR_re_df, FR_re_df_old[not_included]], ignore_index=True, axis='index', sort=True) """ Explanation: As we can see, the NUTS codes were determined successfully for all the facilities in the dataset. Integrate old and new data Some municipalities are not covered by the new data set, provided by ODRE. Now, we find those municipalities and integrate them with the new data. The only column present in the old data, but not in the new, is number_of_installations. Since the old data were aggregated on the municipality level, the column in question refers to the numbers of power plants in the municipalitis. Since the new data covers individual plants, if we set the column number_of_installations to 1 for all the plants in the the new data, we will make the two sets consistent with one another and be able to concatenate them. We will set site_name to 'Aggregated data for municipality' for all the rows from the old data, where municipality refers to the name of the municipality for which the row has been compiled. Note: the electrical capacity in the old data is already in MW, so conversion is not needed. End of explanation """ columns_to_keep = ['EIC_code', 'municipality_group_code', 'IRIS_code', 'as_of_year', 'commissioning_date', 'connection_date', 'data_source', 'departement', 'departement_code', 'disconnection_date', 'electrical_capacity', 'energy_source_level_1', 'energy_source_level_2', 'energy_source_level_3', 'lat', 'lon', 'municipality', 'municipality_code', 'municipality_group', 'number_of_installations', 'nuts_1_region', 'nuts_2_region', 'nuts_3_region', 'region', 'region_code', 'site_name', 'source_station_code', 'technology'] FR_re_df = FR_re_df[columns_to_keep] FR_re_df.reset_index(drop=True, inplace=True) """ Explanation: Select the columns Now, we select the columns we want to keep. End of explanation """ visualize_points(FR_re_df['lat'], FR_re_df['lon'], 'France', categories=FR_re_df['energy_source_level_2'] ) """ Explanation: Visualize End of explanation """ FR_re_df.to_pickle('intermediate/FR_renewables.pickle') del FR_re_df """ Explanation: Save End of explanation """ # Download the data filepaths = downloader.download_data_for_country('PL') # Get the local paths to the data files PL_re_filepath = filepaths['Urzad Regulacji Energetyki'] PL_postcode2nuts_filepath = filepaths['Eurostat'] PL_geo_filepath = filepaths['Geonames'] """ Explanation: Poland PL Download The data which will be processed below is provided by the following data source: Urzad Regulacji Energetyki (URE) / Energy Regulatory Office - Installed capacities of renewable-energy power plants in Poland. The plants are anonymized in the sense that no names, post codes or geographical coordinates are present. They are described by: the energy type their use, installed capacity, województwo (province) and powiat (district) that they are located in. End of explanation """ # Read the data into a pandas dataframe PL_re_df = pd.read_excel(PL_re_filepath, encoding='latin', header=2, skipfooter=14 ) # Show 5 random rows PL_re_df.sample(n=5) """ Explanation: Load and explore the data The dataset comes in the csv format. Let us open it, inspect its columns and clean it a bit before processing it further. End of explanation """ # Get the mask for selecting the WS plants ws_mask = PL_re_df['Rodzaj_OZE'] == 'WS' # Drop them print('Dropping', ws_mask.sum(), 'out of', PL_re_df.shape[0], 'power plants.') PL_re_df.drop(PL_re_df.index[ws_mask], axis=0, inplace=True) PL_re_df.reset_index(drop=True, inplace=True) """ Explanation: There are only five columns: - Lp.: the ordinal number of the entry (power plant), effectively serving as its identification number. - Województwo: the province (voivodeship) where the plant is located - Powiat: the district where the plant is located - Rodzaj_OZE: the code of the energy the plants uses. According to the legend in the .xlsx file, the codes are as follows: - BG: biogas - BM: biomass - PVA: solar energy - WIL: wind energy - WO: hydroenergy - WS: using the technology of co-firing biomass, biogas or bioliquids with other fuels (fossil fuels and biomass / biogas / bioliquids) - Moc zainstalowana [MW]: installed capacity (in MWs). The type corresponding to WS does not fit into the OPSD energy hiearchy, so we can drop such plants. End of explanation """ # Choose the translation terms for Poland, create and show the dictionary columnnames = pd.read_csv(os.path.join('input', 'column_translation_list.csv')) idx_PL = columnnames[(columnnames['country'] == 'PL') & (columnnames['data_source'] == 'Urzad Regulacji Energetyki')].index column_dict_PL = columnnames.loc[idx_PL].set_index('original_name')['opsd_name'].to_dict() column_dict_PL # Translate column names PL_re_df.rename(columns=column_dict_PL, inplace=True) # Show a couple of rows PL_re_df.head(2) """ Explanation: To ease the work, we can translate the columns' names to English using the OPSD translation tables. End of explanation """ print('The number of missing values in the data:', PL_re_df.isna().sum().sum()) print('Are all capacities proper numbers?', PL_re_df['electrical_capacity'].dtype == 'float64') print('What about the energy codes?', PL_re_df['energy_type'].unique()) # Check the voivodeships print('Show the names of the voivodeships.') PL_re_df['region'].unique() """ Explanation: Inspect the data Let us do few quick checks to see state of the data: - Are there any NA values? - Are all the values in the column electrical_capacity proper numbers? - Are all the values in the column energy_type (codes of energy types) consistent strings? Here we check if all the codes appear in one and only one form. For example, PVA is the code for solar energy and we want to make sure that only PVA appears in the column, not other variations such as pva, Pva etc. - What is the form of the geographical data? Are some districts represented by different strings in different rows? What about the regions (provinces, województwa, voivodeships)? We will need the answers to those questions to know how to proceed with processing. End of explanation """ PL_re_df['region'] = PL_re_df['region'].str.strip().str.capitalize() PL_re_df['region'].unique() """ Explanation: We can see that each name comes in two forms: (1) with the first letter capital and (2) with the first letter lowercase. One province is referred to by three different strings: 'Śląskie', 'śląskie', and 'śląskie ' (the last with a trailing white space). In order to standardize this column, we trim and capitalize all the strings appearing in it. End of explanation """ districts = PL_re_df['district'].unique() districts.sort() districts """ Explanation: Now, let us check the strings for districts (powiats). End of explanation """ # Correct the typos PL_re_df.loc[PL_re_df['district'] == 'lipowski', 'district'] = 'lipnowski' PL_re_df.loc[PL_re_df['district'] == 'hojnowski', 'district'] = 'hajnowski' """ Explanation: As we see in the list, the same district can be referred to by more than one string. We identify the following ways a district is referred to in the dataset: 1. by using the noun in the nominative case, capitalized (e.g. Kraków), 2. by prepending m. or m. st. to the form 1 (e.g. m. Kraków or m. st. Warszawy) and 3. by the possesive adjective, lowercase (e.g. krakowski). Some districts, such as Krakow, appear in all the three forms, but there are those which appear in two (e.g. Bytom and m. Bytom). This will pose a problem when we later try to assign the plants their NUTS codes. Furthermore, the NUTS translation tables do not map districts to the codes, but lower administrative units (municipalities) and postcodes to NUTS. We solve this issue at a later point in the notebook, Section Georeferencing (NUTS classification), and not here as it requires heavier processing than warranted during initial explorative analysis and lightweight cleaning of the data. We note that the districts lipowski and hojnowski are misspelled, as they should actually be lipnowski and hajnowski, so we can correct the typos now. End of explanation """ # Choose the translation terms for Poland, create dictionary idx_PL = valuenames[valuenames['country'] == 'PL'].index value_dict_PL = valuenames.loc[idx_PL].set_index('original_name')['opsd_name'].to_dict() # Set energy source level 3 PL_re_df['energy_source_level_3'] = PL_re_df['energy_type'].replace(value_dict_PL) # Create dictionnary in order to assign energy_source_level_2 to its subtype idx_PL = valuenames[valuenames['country'] == 'PL'].index energy_source_dict_PL = valuenames.loc[idx_PL].set_index('original_name')['energy_source_level_2'].to_dict() # Add energy_source_level_2 PL_re_df['energy_source_level_2'] = PL_re_df['energy_type'].replace(energy_source_dict_PL) # Standardize the values for technology # 1. np.nan means that technology should not be specified for the respective kind of sources # according to the hierarchy (http://open-power-system-data.org/2016-10-25-opsd_tree.svg) # 2. 'Other or unspecified technology' means that technology should be specified # but it was unclear or missing in the original dataset. technology_translation_dictionary = { 'BG': np.nan, 'BM': np.nan, 'PVA': 'Other or unspecified technology', # Photovoltaics? 'WIL': 'Other or unspecified technology', # Onshore? 'WO': 'Other or unspecified technology', # Run-of-river } PL_re_df['technology'] = PL_re_df['energy_type'].replace(technology_translation_dictionary) # Add energy_source_level_1 PL_re_df['energy_source_level_1'] = 'Renewable energy' # Show the hierarchy of sources present in the dataset PL_re_df[['energy_source_level_1', 'energy_source_level_2', 'energy_source_level_3', 'technology']].drop_duplicates().sort_values(by='energy_source_level_2') """ Explanation: Harmonising energy levels End of explanation """ # Define the function to standardize district names from the original data def standardize_districts(original_string): if original_string[-1] == ',': # there is one district whose name ends with ','; that's a typo in the data original_string = original_string[:-1] if original_string.startswith('m. st. '): return original_string[7:] elif original_string.startswith('m. '): return original_string[3:] elif any([original_string.endswith(suffix) for suffix in ['ski', 'cki', 'zki']]): return 'Powiat ' + original_string else: return original_string # Get geo-information zip_PL_geo = zipfile.ZipFile(PL_geo_filepath) # Read generated postcode/location file PL_geo = pd.read_csv(zip_PL_geo.open('PL.txt'), sep='\t', header=None) # add column names as defined in associated readme file PL_geo.columns = ['country_code', 'postcode', 'place_name', 'admin_name1', 'admin_code1', 'admin_name2', 'admin_code2', 'admin_name3', 'admin_code3', 'lat', 'lon', 'accuracy'] # Drop rows of possible duplicate postal_code PL_geo.drop_duplicates('postcode', keep='last', inplace=True) PL_geo['postcode'] = PL_geo['postcode'].astype(str) # Get the names geonames_districts = PL_geo['admin_name2'].unique() # Show them geonames_districts # Standardize the district names from the original data PL_re_df['standardized_district'] = PL_re_df['district'].apply(standardize_districts) standardized_districts = PL_re_df['standardized_district'].unique() # Check which districts could not be found in the GeoNames data #print(len([x for x in semi if x in geopowiats]), len([x for x in semi if x not in geopowiats])) not_found = set(standardized_districts).difference(set(geonames_districts)) number_of_not_found = len(not_found) total = len(standardized_districts) print('{}/{} names could not be found. Those are:'.format(number_of_not_found, total)) print(not_found) """ Explanation: Georeferencing (NUTS classification) We have already seen that the district names are not standardized and observed that we cannot use them directly to get the corresponding NUTS codes. There is a way to get around this issue. We can do it as folows: 1. First, we find a postcode in the GeoNames zip for Poland that corresponds to each district in the URE data. To do so, we must standardize all the district names to the forms that appear in the GeoNames zip file. 2. Then, we can easily map a postcode to the appropriate NUTS codes using nuts_converter. By inspection, we observe that all the district names in the zip have one of the following two forms: - Noun in the nominative case, capitalized. - Powiat * where * is a possessive adjective. So, we standardize all the strings in the district column as follows: - Remove all the trailing whitespaces and characters other than letters. - If the string starts with m. or m. st., remove m. (or m. st.) from the beginning of the string. - If the string ends with a possessive suffix ski, cki or zki, prepend the string Powiat (note the ending whitespace) to it. End of explanation """ # We define the similarity between two strings, string1 and string2, # as the length of the longest prefix of string1 that appears in string2. # Note 1: this measure of similarity is not necessarily symmetrical. # Note 2: a prefix of a string is its substring that starts from the beginning of the string. def calculate_similarity(string1, string2): for n in range(len(string1), 1, -1): prefix = string1[0:(n-1)] if prefix in string2: return len(prefix) return 0 # Define a function to find, among a group of candidate strings, # the most similar string to the one given as the reference string. def find_the_most_similar(reference_string, candidate_strings): the_most_similar = None maximal_similarity = 0 for candidate_string in candidate_strings: similarity = calculate_similarity(reference_string, candidate_string) if similarity > maximal_similarity: maximal_similarity = similarity the_most_similar = candidate_string return the_most_similar, maximal_similarity already_mapped = PL_re_df[['district', 'standardized_district']].drop_duplicates().to_dict(orient='records') already_mapped = {mapping['district'] : mapping['standardized_district'] for mapping in already_mapped if mapping['standardized_district'] in geonames_districts} # Make a dictionary to map each district from the original data to its GeoNames equivalent. # The districts whose standardized versions have been found in the GeoNames data to their standardizations. # The mappings for other districts will be found using the previously defined similarity measures. districts_map = PL_re_df[['district', 'standardized_district']].drop_duplicates().to_dict(orient='records') districts_map = {mapping['district'] : mapping['standardized_district'] for mapping in districts_map} # Override the mappings for the 49 districts whose standardized names have not been found in the GeoNames data. for district, standardized_district in districts_map.items(): #standardized_district = ['standardized_district'] if standardized_district not in geonames_districts: #print('---------') if standardized_district.startswith('Powiat'): standardized_district = standardized_district[7:] #print(district) capitalized = standardized_district.capitalize() lowercase = standardized_district.lower() candidate1, similarity1 = find_the_most_similar(capitalized, geonames_districts) candidate2, similarity2 = find_the_most_similar(lowercase, geonames_districts) if similarity1 > similarity2: districts_map[district] = candidate1 #print('\t', candidate1, similarity1) elif similarity2 > similarity1: districts_map[district] = candidate2 #print('\t', candidate2, similarity2) else: # Break the ties by mapping to the shorter string if len(candidate1) < len(candidate2): districts_map[district] = candidate1 #print('\t', candidate1, '|', candidate2, similarity1) else: districts_map[district] = candidate2 #print('\t', candidate2, '|', candidate1, similarity2) # Apply the override to PL_re_df PL_re_df['standardized_district'] = PL_re_df['district'].apply(lambda district: districts_map[district]) # Show the results PL_re_df[['district', 'standardized_district']].drop_duplicates() """ Explanation: We can now apply a heuristic method for finding the corresponding name in the GeoNames data. It is based on similarity between strings. It turns out that it works fine, except for a couple of cases, which we deal with manually. End of explanation """ # Clear the mappings for wołowski, Nowy Sącz, rzeszowski, hojnowski. for district in ['wołowski', 'm. Nowy Sącz', 'rzeszowski', 'hojnowski']: districts_map[district] = '' PL_re_df.loc[PL_re_df['district'] == district, 'standardized_district'] = '' # For each mapping, select a postcode from the GeoNames data df_dict = {'original' : [], 'geonames' : []} for original_name in districts_map: geonames_name = districts_map[original_name] df_dict['original'].append(original_name) df_dict['geonames'].append(geonames_name) mapping_df = pd.DataFrame.from_dict(df_dict) # To make sure that the selected postcodes do appear in the NUTS table, # we drop, from PL_geo, all rows with the postcodes not in the postcode-to-NUTS table for Poland. PL_table = nuts_converter.open_postcode2nuts(filepaths['Eurostat'])['CODE'] PL_geo = pd.merge(PL_geo, PL_table, how='inner', left_on='postcode', right_on='CODE') PL_geo.drop(['CODE'], axis='columns', inplace=True) # merged = pd.merge(mapping_df, PL_geo[['admin_name2', 'postcode']], how='left', left_on='geonames', right_on='admin_name2') # Rename the column postcode to make its meaning straightforward merged.rename(columns={'postcode' : 'random_postcode'}, inplace=True) merged = merged.drop_duplicates(['geonames']) print(PL_re_df.shape) PL_re_df = pd.merge(PL_re_df, merged[['geonames', 'random_postcode']], how='left', left_on='standardized_district', right_on='geonames') # Show results PL_re_df.head(2) """ Explanation: The following districts have not been mapped correctly: wołowski, m. Nowy Sącz and rzeszowski. Let us clear their mappings so that we can assign them their NUTS codes manually later. End of explanation """ display(PL_re_df[PL_re_df['random_postcode'].isnull()]) PL_re_df['random_postcode'].isnull().sum() """ Explanation: Show the rows for which we could not find postcodes. End of explanation """ PL_postcode2nuts_path = filepaths['Eurostat'] PL_re_df = nuts_converter.add_nuts_information(PL_re_df, 'PL', PL_postcode2nuts_path, postcode_column='random_postcode', how=['postcode']) # Report the number of facilites whose NUTS codes were successfully sudetermined determined = PL_re_df['nuts_1_region'].notnull().sum() print('NUTS successfully determined for', determined, 'out of', PL_re_df.shape[0], 'facilities in PL.') # Manual assignments manual_nuts3_map = { 'wołowski' : 'PL518', 'm. Nowy Sącz' : 'PL218', 'rzeszowski' : 'PL325' } for district in manual_nuts3_map: nuts3 = manual_nuts3_map[district] nuts2 = nuts3[:-1] nuts1 = nuts3[:-2] mask = (PL_re_df['district'] == district) PL_re_df.loc[mask, ['nuts_1_region', 'nuts_2_region', 'nuts_3_region']] = [nuts1, nuts2, nuts3] # Report the number of facilites whose NUTS codes could not be determined not_determined = PL_re_df['nuts_1_region'].isnull().sum() print('NUTS could not be determined for', not_determined, 'out of', PL_re_df.shape[0], 'facilities in PL.') """ Explanation: There are only 17 such power plants and all of them are placed in the districts which we deliberately left out for manual classification. Add NUTS information We add the NUTS information as usual, using the converter. After that, we manually add the codes for the left-out districts as follows: | District | NUTS_1 | NUTS_2 | NUTS_3 | |----------|--------|--------|--------| | wołowski | PL5 | PL51 | PL518 | | m. Nowy Sącz | PL2 | PL21 | PL218 | | rzeszowski | PL3 | PL32 | PL325 | End of explanation """ PL_re_df['data_source'] = 'Urzad Regulacji Energetyki' PL_re_df['as_of_year'] = 2019 # The year for which the dataset has been compiled by the data source """ Explanation: Add data source and year End of explanation """ # Choose which column to keep PL_re_df = PL_re_df.loc[:, [ 'URE_id', 'region', 'district', 'nuts_1_region', 'nuts_2_region', 'nuts_3_region', 'electrical_capacity', 'energy_source_level_1', 'energy_source_level_2', 'energy_source_level_3', 'technology', 'data_source', 'as_of_year']] """ Explanation: Select columns End of explanation """ PL_re_df.to_pickle('intermediate/PL_renewables.pickle') del PL_re_df """ Explanation: Save End of explanation """ # Download the data and get the local paths of the downloaded files filepaths = downloader.download_data_for_country('CH') CH_re_filepath = filepaths['BFE'] CH_geo_filepath = filepaths['Geonames'] CH_postcode2nuts_filepath = filepaths['Eurostat'] # Get data of renewables per municipality CH_re_df = pd.read_excel(CH_re_filepath, sheet_name='KEV BezÃŒger 2018', encoding='UTF8', thousands='.', decimals=',' #header=[0] #skipfooter=9, # contains summarized values #index_col=[0, 1], # required for MultiIndex #converters={'Code officiel géographique':str} ) """ Explanation: Switzerland CH Download and read The data which will be processed below is provided by the following data sources: Swiss Federal Office of Energy - Data of all renewable power plants receiving "Kostendeckende EinspeisevergÃŒtung" (KEV) which is the Swiss feed in tarif for renewable power plants. Geodata is based on municipality codes. The available municipality code in the original data provides an approximation for the geocoordinates of the renewable power plants. The postcode will be assigned to latitude and longitude coordinates with the help of the postcode table. geonames.org - The postcode data from Switzerland is provided by Geonames and licensed under a Creative Commons Attribution 3.0 license. End of explanation """ # Choose the translation terms for Switzerland, create dictionary and show dictionary idx_CH = columnnames[columnnames['country'] == 'CH'].index column_dict_CH = columnnames.loc[idx_CH].set_index('original_name')['opsd_name'].to_dict() column_dict_CH # Translate columnnames CH_re_df.columns = [column_name.replace("\n", "") for column_name in CH_re_df.columns] CH_re_df.rename(columns=column_dict_CH, inplace=True) """ Explanation: Translate column names End of explanation """ CH_re_df['data_source'] = 'BFE' """ Explanation: Add data source End of explanation """ # Choose the translation terms for Switzerland, create dictionary idx_CH = valuenames[valuenames['country'] == 'CH'].index value_dict_CH = valuenames.loc[idx_CH].set_index('original_name')['opsd_name'].to_dict() """ Explanation: Harmonize energy source hierarchy and translate values End of explanation """ # Assign energy_source_level_1 to the dataframe CH_re_df['energy_source_level_1'] = 'Renewable energy' # Create dictionnary in order to assign energy_source to its subtype #energy_source_dict_CH = valuenames.loc[idx_CH].set_index('opsd_name')['energy_source_level_2'].to_dict() # # ...and the energy source subtype values in the energy_source column are replaced by # the higher level classification #CH_re_df['energy_source_level_2'].replace(energy_source_dict_CH, inplace=True) CH_re_df['energy_source_level_3'] = CH_re_df['technology'] # Create dictionnary in order to assign energy_source_level_2 to its subtype idx_CH = valuenames[valuenames['country'] == 'CH'].index energy_source_dict_CH = valuenames.loc[idx_CH].set_index('original_name')['energy_source_level_2'].to_dict() # Add energy_source_level_2 CH_re_df['energy_source_level_2'] = CH_re_df['energy_source_level_2'].replace(energy_source_dict_CH) # Translate values in order to standardize energy_source_level_3 value_dict_CH = valuenames.loc[idx_CH].set_index('original_name')['opsd_name'].to_dict() CH_re_df['energy_source_level_3'].replace(value_dict_CH, inplace=True) # Standardize the values for technology # 1. np.nan means that technology should not be specified for the respective kind of sources # according to the hierarchy (http://open-power-system-data.org/2016-10-25-opsd_tree.svg) # 2. 'Other or unspecified technology' means that technology should be specified # but it was unclear or missing in the original dataset. technology_translation_dictionary = { 'KlÀrgasanlage': np.nan, 'Dampfprozess': 'Steam turbine', 'ÃŒbrige Biomasse - WKK-Anlage': 'Other or unspecified technology', 'ÃŒbrige Biomasse - Dampfprozess': 'Steam turbine', 'Schlammverbrennungsanlage': 'Combustion engine', 'WKK-Prozess': 'Other or unspecified technology', 'Kehrrichtverbrennungsanlage': 'Combustion engine', 'Integrierte Anlage': 'Photovoltaics', 'Angebaute Anlage': 'Photovoltaics', 'Freistehende Anlage': 'Photovoltaics', 'Trinkwasserkraftwerk': 'Other or unspecified technology', 'Durchlaufkraftwerk': 'Run-of-river', 'Dotierwasserkraftwerk': 'Other or unspecified technology', 'Ausleitkraftwerk': 'Other or unspecified technology', 'Wind Offshore': 'Other or unspecified technology', 'Abwasserkraftwerk': 'Other or unspecified technology', 'Unbekannt': 'Other or unspecified technology', np.nan: 'Onshore', None: 'Onshore' } CH_re_df['technology'].replace(technology_translation_dictionary, inplace=True) # Add energy_source_level_1 CH_re_df['energy_source_level_1'] = 'Renewable energy' # Show the hierarchy of sources present in the dataset energy_columns = ['energy_source_level_1', 'energy_source_level_2', 'energy_source_level_3', 'technology'] CH_re_df[energy_columns].drop_duplicates().sort_values(by='energy_source_level_2') """ Explanation: Separate and assign energy source level 1-3 and technology End of explanation """ drop_mask = (CH_re_df['energy_source_level_3'] == 'Biomass and biogas') & \ (CH_re_df['technology'] == 'Steam turbine') drop_indices = drop_mask[drop_mask].index CH_re_df.drop(drop_indices, axis='index', inplace=True) CH_re_df.reset_index(drop=True, inplace=True) """ Explanation: The power plants with energy_source_level_3=Biomass and biogas and technology=Steam turbine do not belong to the renewable energy power plants, so we can remove them. End of explanation """ CH_re_df.replace(value_dict_CH, inplace=True) """ Explanation: Replace the rest of the original terms with their OPSD equivalents End of explanation """ # Get geo-information zip_CH_geo = zipfile.ZipFile(CH_geo_filepath) # Read generated postcode/location file CH_geo = pd.read_csv(zip_CH_geo.open('CH.txt'), sep='\t', header=None) # add column names as defined in associated readme file CH_geo.columns = ['country_code', 'postcode', 'place_name', 'admin_name1', 'admin_code1', 'admin_name2', 'admin_code2', 'admin_name3', 'admin_code3', 'lat', 'lon', 'accuracy'] # Drop rows of possible duplicate postal_code CH_geo.drop_duplicates('postcode', keep='last', inplace=True) CH_geo['postcode'] = CH_geo['postcode'].astype(str) # harmonise data class CH_geo.postcode = CH_geo.postcode.astype(int) # Add longitude/latitude infomation assigned by municipality code CH_re_df = pd.merge(CH_re_df, CH_geo[['lat', 'lon', 'postcode']], left_on='municipality_code', right_on='postcode', how='left' ) zip_CH_geo.close() """ Explanation: Georeferencing Postcode to lat/lon (WGS84) End of explanation """ CH_postcode2nuts_path = filepaths['Eurostat'] # Use the string versions of postcode and municipality code columns CH_re_df['postcode_str'] = CH_re_df['postcode'].astype(str).str[:-2] CH_re_df['municipality_code_str'] = CH_re_df['municipality_code'].astype(str) CH_re_df = nuts_converter.add_nuts_information(CH_re_df, 'CH', CH_postcode2nuts_path, postcode_column='postcode_str', municipality_code_column='municipality_code_str', lau_name_type='NATIONAL', how=['postcode', 'municipality']) # Report the number of facilites whose NUTS codes were successfully sudetermined determined = CH_re_df['nuts_1_region'].notnull().sum() print('NUTS successfully determined for', determined, 'out of', CH_re_df.shape[0], 'facilities in CH.') # Report the number of facilites whose NUTS codes could not be determined not_determined = CH_re_df['nuts_1_region'].isnull().sum() print('NUTS could not be determined for', not_determined, 'out of', CH_re_df.shape[0], 'facilities in CH.') """ Explanation: Add NUTS information End of explanation """ CH_re_df[CH_re_df['nuts_1_region'].isnull()][['postcode', 'municipality']] # Check the facilities without NUTS classification no_nuts = CH_re_df['nuts_1_region'].isnull() # Find the masks where some information for finding the proper NUTS code is present municipality_name_present = ~(CH_re_df['municipality'].isnull()) # Show the cases where NUTS classification failed even though it shouldn't have problematic_municipality_names = CH_re_df[no_nuts & municipality_name_present]['municipality'].unique() print('Problematic municipalities:', ', '.join(list(problematic_municipality_names)) + '.') print('Are those names present in the official NUTS tables for CH?') if nuts_converter.municipality2nuts_df['municipality'].isin(problematic_municipality_names).any(): print('At least one is.') else: print('No, none is.') """ Explanation: Let us check the stations for which NUTS codes could not be determined. End of explanation """ # kW to MW CH_re_df['electrical_capacity'] /= 1000 # kWh to MWh CH_re_df['production'] /= 1000 """ Explanation: We see that the municipalities of only plants for which we could not determine the NUTS codes cannot be found in the official translation tables, so there was no possibility to assign them their NUTS classification codes. Transform electrical_capacity from kW to MW End of explanation """ columns_to_keep = ['project_name', 'energy_source_level_2','energy_source_level_3', 'technology', 'electrical_capacity', 'production', 'tariff', 'commissioning_date', 'contract_period_end', 'address', 'municipality_code', 'municipality', 'nuts_1_region', 'nuts_2_region', 'nuts_3_region', 'canton', 'company', 'title', 'surname', 'first_name', 'data_source', 'energy_source_level_1', 'lat', 'lon', 'postcode'] CH_re_df = CH_re_df.loc[:, columns_to_keep] CH_re_df.reset_index(drop=True, inplace=True) """ Explanation: Select columns to keep End of explanation """ visualize_points(CH_re_df['lat'], CH_re_df['lon'], 'Switzerland', categories=CH_re_df['energy_source_level_2'] ) """ Explanation: Visualize End of explanation """ CH_re_df.to_pickle('intermediate/CH_renewables.pickle') del CH_re_df """ Explanation: Save End of explanation """ # Download the data and get the local paths to the corresponding files filepaths = downloader.download_data_for_country('UK') UK_re_filepath = filepaths['BEIS'] UK_geo_filepath = filepaths['Geonames'] UK_postcode2nuts_filepath = filepaths['Eurostat'] # Read the renewable powerplants data into a dataframe UK_re_df = pd.read_csv(UK_re_filepath, header=2, encoding='latin1', parse_dates=['Record Last Updated (dd/mm/yyyy)','Operational'], infer_datetime_format=True, thousands=',' ) # Drop empty columns and rows UK_re_df.dropna(axis='index', how='all', inplace=True) UK_re_df.dropna(axis='columns', how='all', inplace=True) """ Explanation: Check and validation of the renewable power plants list as well as the creation of CSV/XLSX/SQLite files can be found in Part 2 of this script. It also generates a daily time series of cumulated installed capacities by energy source. United Kingdom UK The data for the UK are provided by the following sources: UK Government Department of Business, Energy & Industrial Strategy (BEIS) - the data contain information on the UK renewable energy sources and are updated at the end of each quarter. geonames.org - the data about latitued and longitudes of the UK postcodes. Download and Read End of explanation """ # Keep only operational facilities in the dataset UK_re_df = UK_re_df.loc[UK_re_df["Development Status"] == "Operational"] UK_re_df.reset_index(inplace=True, drop=True) # Standardize string columns strip_and_lower = ['CHP Enabled'] strip_only = ['Country', 'County', 'Operator (or Applicant)', 'Mounting Type for Solar'] for column in strip_and_lower: util.helper.standardize_column(UK_re_df, column, lower=True) for column in strip_only: util.helper.standardize_column(UK_re_df, column, lower=False) # Drop Flywheels, Battery and Liquid Air Energy Storage UK_re_df = UK_re_df[~UK_re_df['Technology Type'].isin(['Flywheels', 'Battery', 'Liquid Air Energy Storage'])] UK_re_df.reset_index(drop=True, inplace=True) # Copy the column "Technology Type" to a new column named "technology" UK_re_df['technology'] = UK_re_df['Technology Type'] """ Explanation: Clean the data The downloaded dataset has to be cleaned: - Both operational and nonoperational facilities are present in the set. However, only operational facilities are of the interest, so the dataset has to be filtered on this condition. - Some columns don't have standardized values. For example, CHP Enabled contains five different strings: "No", "Yes", "no", "yes", and "No " with a trailing white space, even though they represent only two distinct values. So, we have to ensure a 1-to-1 mapping between the true values of a feature and their representations for all the features present in the set. - The technologies Battery, Flywheels and Liquid Air Energy Storage are of no interest, so the facilities using them should be omitted. End of explanation """ # Choose the translation terms for the UK and create the translation dictionary idx_UK = columnnames[columnnames['country'] == 'UK'].index column_dict_UK = columnnames.loc[idx_UK].set_index('original_name')['opsd_name'].to_dict() # Show the dictionary column_dict_UK # Translate column names UK_re_df.rename(columns=column_dict_UK, inplace=True) """ Explanation: Translate column names End of explanation """ UK_re_df['data_source'] = 'BEIS' """ Explanation: Add data source End of explanation """ # Create dictionnary in order to assign energy_source_level_2 to its subtype idx_UK = valuenames[valuenames['country'] == 'UK'].index energy_source_dict_UK = valuenames.loc[idx_UK].set_index('original_name')['energy_source_level_2'].to_dict() # Add energy_source_level_2 UK_re_df['energy_source_level_2'] = UK_re_df['energy_source_level_3'].replace(energy_source_dict_UK) # Translate values in order to standardize energy_source_level_3 value_dict_UK = valuenames.loc[idx_UK].set_index('original_name')['opsd_name'].to_dict() UK_re_df['energy_source_level_3'].replace(value_dict_UK, inplace=True) # Standardize the values for technology # 1. np.nan means that technology should not be specified for the respective kind of sources # according to the hierarchy (http://open-power-system-data.org/2016-10-25-opsd_tree.svg) # 2. 'Other or unspecified technology' means that technology should be specified # but it was unclear or missing in the original dataset. technology_translation_dictionary = { 'Biomass (co-firing)': 'Other or unspecified technology', 'Biomass (dedicated)': 'Other or unspecified technology', 'Advanced Conversion Technologies': 'Other or unspecified technology', 'Anaerobic Digestion': 'Other or unspecified technology', 'EfW Incineration': np.nan, 'Large Hydro': 'Other or unspecified technology', 'Small Hydro': 'Other or unspecified technology', 'Landfill Gas': np.nan, 'Solar Photovoltaics': 'Photovoltaics', 'Sewage Sludge Digestion': np.nan, 'Tidal Barrage and Tidal Stream': np.nan, 'Shoreline Wave': np.nan, 'Wind Offshore': 'Offshore', 'Wind Onshore': 'Onshore', 'Pumped Storage Hydroelectricity': 'Pumped storage' } UK_re_df['technology'].replace(technology_translation_dictionary, inplace=True) # Add energy_source_level_1 UK_re_df['energy_source_level_1'] = 'Renewable energy' # Show the hierarchy of sources present in the dataset UK_re_df[['energy_source_level_1', 'energy_source_level_2', 'energy_source_level_3', 'technology']].drop_duplicates() """ Explanation: Translate values and harmonise energy source levels 1-3 and technology End of explanation """ # Define a wrapper for bng_to_latlon for handling None values def to_lat_lon(easting, northing): if pd.isnull(easting) or pd.isnull(northing): return (None, None) else: return bng_to_latlon.OSGB36toWGS84(easting, northing) # Convert easting and northing columns to numbers UK_re_df['X-coordinate'] = pd.to_numeric( UK_re_df['X-coordinate'].astype(str).str.replace(',', ''), errors='coerce' ) UK_re_df['Y-coordinate'] = pd.to_numeric( UK_re_df['Y-coordinate'].astype(str).str.replace(',', ''), errors='coerce' ) # Convert easting and northing coordinates to standard latitude and longitude latlon = UK_re_df.apply(lambda row: to_lat_lon(row["X-coordinate"], row["Y-coordinate"]), axis=1 ) # Split a column of (latitude, longitude) pairs into two separate coordinate columns latitude = latlon.apply(lambda x: x[0]) longitude = latlon.apply(lambda x: x[1]) # Add them to the dataframe UK_re_df['latitude'] = latitude UK_re_df['longitude'] = longitude """ Explanation: Georeferencing The facilities' location details comprise of the information on the address, county, region, country (England, Scotland, Wales, Northern Ireland), post code, and Easting (X) and Northing (Y) coordinates of each facility in the OSGB georeferencing system. To convert the easting and northing cordinates to standard WG84 latitude and longitude, we use package bng_latlon. End of explanation """ # Get geo-information zip_UK_geo = zipfile.ZipFile(UK_geo_filepath) # Read generated postcode/location file UK_geo = pd.read_csv(zip_UK_geo.open('GB_full.txt'), sep='\t', header=None) # add column names as defined in associated readme file UK_geo.columns = ['country_code', 'postcode', 'place_name', 'admin_name1', 'admin_code1', 'admin_name2', 'admin_code2', 'admin_name3', 'admin_code3', 'lat', 'lon', 'accuracy'] # Drop rows of possible duplicate postal_code UK_geo.drop_duplicates('postcode', keep='last', inplace=True) UK_geo['postcode'] = UK_geo['postcode'].astype(str) # Find the rows where latitude and longitude are unknown missing_latlon_mask = UK_re_df['latitude'].isna() | UK_re_df['longitude'].isna() missing_latlon = UK_re_df[missing_latlon_mask] # Add longitude/latitude infomation assigned by post code updated_latlon = pd.merge(missing_latlon, UK_geo[['lat', 'lon', 'postcode']], left_on='postcode', right_on='postcode', how='left' ) # Return the updated rows to the original frame UK_re_df = pd.merge(UK_re_df, updated_latlon[['uk_beis_id', 'lat', 'lon']], on='uk_beis_id', how='left' ) # Use the bng_to_latlon coordinates (columns: 'latitude' and 'longitude') if present, # otherwise, use those obtained with UK_geo (columns: 'lat' and 'lon'). UK_re_df['longitude'] = UK_re_df.apply(lambda row: row['longitude'] if not pd.isnull(row['longitude']) else row['lon'], axis=1 ) UK_re_df['latitude'] = UK_re_df.apply(lambda row: row['latitude'] if not pd.isnull(row['latitude']) else row['lat'], axis=1 ) # Drop the UK_geo columns (lat/lon) # as the information was moved to the 'latitude' and 'longitude' columns. UK_re_df.drop(['lat', 'lon'], axis='columns', inplace=True) zip_UK_geo.close() """ Explanation: Cases with unknown Easting and Northing coordinates If the Easting and Northing coordinates of a facility are not provided, its latitude and longitude cannot be determined. For such sources, we look up the WGS84 coordinates in the geodataset provided by geonames.org, where the UK postcodes are paired with their latitudes and longitudes. End of explanation """ # Find the rows where latitude and longitude are unknown missing_latlon_mask = UK_re_df['latitude'].isna() | UK_re_df['longitude'].isna() missing_latlon = UK_re_df[missing_latlon_mask].copy() missing_latlon = missing_latlon.reset_index() # Determine their post code prefixes prefixes = missing_latlon.apply(lambda row: str(row['postcode']).split(' ')[0], axis=1 ) missing_latlon['Prefix'] = prefixes # Determine the centroids of the areas covered by the prefixes grouped_UK_geo=UK_geo.groupby(by=lambda i: str(UK_geo['postcode'].loc[i]).split(' ')[0]) # Assing the centroid coordinates to the facilities with unknown coordinates updated_latlon = pd.merge(missing_latlon, grouped_UK_geo.mean(), left_on="Prefix", right_index=True, how="left" ) # Return the updated rows to the original frame UK_re_df = pd.merge(UK_re_df, updated_latlon[['uk_beis_id', 'lat', 'lon']], on='uk_beis_id', how='left' ) # Keep the already known coordinates (columns: 'latitude' and 'longitude') if present, # otherwise, use those obtained by approximation (columns: 'lat' and 'lon'). UK_re_df['longitude'] = UK_re_df.apply(lambda row: row['longitude'] if not pd.isnull(row['longitude']) else row['lon'], axis=1 ) UK_re_df['latitude'] = UK_re_df.apply(lambda row: row['latitude'] if not pd.isnull(row['latitude']) else row['lat'], axis=1 ) # Drop the UK_geo columns (lat/lon) # as the information was moved to the 'latitude' and 'longitude' columns. UK_re_df.drop(['lat', 'lon'], axis='columns', inplace=True) """ Explanation: Cases for approximation In the cases where the full post code was not present in geonames.org, use its prefix to find the latitude / longitude pairs of locations covered by that prefix. Then, approximate those facilities' locations by the centroids of their prefix areas. End of explanation """ UK_postcode2nuts_filepath = filepaths['Eurostat'] UK_re_df = nuts_converter.add_nuts_information(UK_re_df, 'UK', UK_postcode2nuts_filepath, latitude_column='latitude', longitude_column='longitude', closest_approximation=True, lau_name_type='NATIONAL', how=['latlon', 'municipality']) # Report the number of facilites whose NUTS codes were successfully sudetermined determined = UK_re_df['nuts_1_region'].notnull().sum() print('NUTS successfully determined for', determined, 'out of', UK_re_df.shape[0], 'facilities in UK.') # Report the number of facilites whose NUTS codes could not be determined not_determined = UK_re_df['nuts_1_region'].isnull().sum() print('NUTS could not be determined for', not_determined, 'out of', UK_re_df.shape[0], 'facilities in UK.') """ Explanation: Add NUTS information End of explanation """ UK_re_df[UK_re_df['nuts_1_region'].isnull()] """ Explanation: Let us see the facilities for which the NUTS codes could not be determined. End of explanation """ visualize_points(UK_re_df['latitude'], UK_re_df['longitude'], 'United Kingdom', categories=UK_re_df['energy_source_level_2'] ) """ Explanation: There are two such rows only. The langitude and longitude coordinates, as well as municipality codes, are missing from the data set, so NUTS codes could not have been determined. Visualize the data End of explanation """ max_X = UK_re_df['X-coordinate'].max() min_X = UK_re_df['X-coordinate'].min() max_Y = UK_re_df['Y-coordinate'].max() min_Y = UK_re_df['Y-coordinate'].min() figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k') ax = plt.axes(projection=ccrs.OSGB()) ax.coastlines('10m') ax.scatter(UK_re_df['X-coordinate'], UK_re_df['Y-coordinate'],s=0.5) plt.show() """ Explanation: We see that some facilities appear to be located in the sea. Let us plot the original OSGB coordinates to see if translation to the standard longitude and latitude coordinates failed for some locations. End of explanation """ # Rename 'longitude' and 'latitude' to 'lon' and 'lat' to conform to the naming convention # used for other countries. UK_re_df.rename(columns={'longitude': 'lon', 'latitude': 'lat'}, inplace=True) # Define the columns to keep columns_of_interest = ['commissioning_date', 'uk_beis_id', 'operator', 'site_name', 'energy_source_level_1', 'energy_source_level_2', 'energy_source_level_3', 'technology', 'electrical_capacity', 'chp', 'support_robranding', 'support_fit', 'support_cfd', 'capacity_individual_turbine', 'number_of_turbines', 'solar_mounting_type', 'status', 'address', 'municipality', 'nuts_1_region', 'nuts_2_region', 'nuts_3_region', 'region', 'country', 'postcode', 'lon', 'lat', 'data_source' ] for col in columns_of_interest: if col not in UK_re_df.columns: print(col) # Clean the dataframe from columns other than those specified above UK_re_df = UK_re_df.loc[:, columns_of_interest] UK_re_df.reset_index(drop=True, inplace=True) UK_re_df.columns """ Explanation: As we can see, the maps are basically the same, which confirms that translation to the longitude and latitude coordinates is done correctly and that they reflect the positions specified by the original X and Y OSGB coordinates. Keep only the columns of interest End of explanation """ UK_re_df.to_pickle('intermediate/UK_renewables.pickle') """ Explanation: Save End of explanation """ # Download the data and get the local paths to the corresponding files filepaths = downloader.download_data_for_country('SE') print(filepaths) SE_re_filepath = filepaths['Vindbrukskollen'] SE_geo_filepath = filepaths['Geonames'] SE_postcode2nuts_filepath = filepaths['Eurostat'] """ Explanation: Sweden The data for Sweden are provided by the following sources: Vindbrukskollen - Wind farms in Sweden. End of explanation """ # Define the function for converting the column "Senast sparads" to date type #def from_int_to_date(int_date): # print(int_date) # str_date =str(int_date) # year = str_date[:4] # month = str_date[4:6] # day = str_date[6:8] # str_date = '{}/{}/{}'.format(year, month, day) # return pd.to_datetime(str_date, format='%Y/%m/%d') # Read the data SE_re_df = pd.read_excel(SE_re_filepath, sheet_name='Vindkraftverk', na_values='-', parse_dates=['Uppfört', 'Senast sparad'], infer_datetime_format=True, #converters={'Senast sparad' : from_int_to_date} ) # Show 5 rows from the beginning SE_re_df.head(5) """ Explanation: Load the data End of explanation """ # Drop empty rows and columns SE_re_df.dropna(axis='index', how='all', inplace=True) SE_re_df.dropna(axis='columns', how='all', inplace=True) # Make sure that the column Uppfört is of the date type and correctly formatted SE_re_df['Uppfört'] = pd.to_datetime(SE_re_df['Uppfört'], format='%Y-%m-%d') # Keep only operational wind farms subset_mask = SE_re_df['Status'].isin(['Beviljat', 'Uppfört']) SE_re_df.drop(SE_re_df[~subset_mask].index, axis='index', inplace=True) # Remove the farms whose capacity is not known. subset_mask = SE_re_df['Maxeffekt (MW)'].isna() SE_re_df.drop(SE_re_df[subset_mask].index, axis='index', inplace=True) # Standardize string columns string_columns = ['Modell', 'Fabrikat', 'ElomrÃ¥de', 'Kommun', 'LÀn', 'Handlingstyp', 'Placering'] for col in string_columns: util.helper.standardize_column(SE_re_df, col, lower=False) """ Explanation: Clean the data Drop empty rows and columns. Make sure that the column Uppfört is of the date type. Keep only operational wind farms (Status is Beviljat (permission granted) or Uppfört (the farm exists)). Remove the farms whose capacity is not known. Standardize string columns. End of explanation """ # Choose the translation terms for the UK and create the translation dictionary idx_SE = columnnames[columnnames['country'] == 'SE'].index column_dict_SE = columnnames.loc[idx_SE].set_index('original_name')['opsd_name'].to_dict() # Show the dictionary display(column_dict_SE) # Translate column names SE_re_df.rename(columns=column_dict_SE, inplace=True) """ Explanation: Translate column names End of explanation """ SE_re_df.loc[(SE_re_df['commissioning_date'].dt.year == 1900), 'commissioning_date'] = np.nan """ Explanation: Correct the dates Some wind farms are declared to be commissioned in the year 1900. We set those dates to np.nan. End of explanation """ SE_re_df['data_source'] = 'Vindbrukskollen' """ Explanation: Add source End of explanation """ # Choose the translation terms for Sweden idx_SE = valuenames[valuenames['country'] == 'SE'].index value_dict_SE = valuenames.loc[idx_SE].set_index('original_name')['opsd_name'].to_dict() value_dict_SE # Replace all original value names by the OPSD value names SE_re_df.replace(value_dict_SE, inplace=True) # Set nans in the technology column to 'Unknown or unspecified technology' SE_re_df['technology'].fillna('Unknown or unspecified technology', inplace=True) # Add energy level 2 SE_re_df['energy_source_level_2'] = 'Wind' # Add energy_source_level_1 SE_re_df['energy_source_level_1'] = 'Renewable energy' # Show the hierarchy of sources present in the dataset SE_re_df[['energy_source_level_1', 'energy_source_level_2', 'technology']].drop_duplicates() """ Explanation: Translate values and harmonize energy source levels End of explanation """ # Get latitude and longitude columns lat, lon = util.helper.sweref99tm_latlon_transform(SE_re_df['sweref99tm_north'], SE_re_df['sweref99tm_east']) # Include them in the dataframe SE_re_df['lat'] = lat SE_re_df['lon'] = lon """ Explanation: Georeferencing The coordinates in the columns sweref99tm_north and sweref99tm_east are specified in the SWEREF 99 TM coordinate system, used in Sweden. To convert those coordinates to the usual WGS84 latitudes and longitudes, we use the function sweref99tm_latlon_transform from the module util.helper, provided by Jon Olauson. End of explanation """ SE_postcode2nuts_filepath = filepaths['Eurostat'] SE_re_df = nuts_converter.add_nuts_information(SE_re_df, 'SE', SE_postcode2nuts_filepath, lau_name_type='NATIONAL', how=['municipality', 'latlon']) # Report the number of facilites whose NUTS codes were successfully sudetermined determined = SE_re_df['nuts_1_region'].notnull().sum() print('NUTS successfully determined for', determined, 'out of', SE_re_df.shape[0], 'facilities in SE.') # Report the number of facilites whose NUTS codes could not be determined not_determined = SE_re_df['nuts_1_region'].isnull().sum() print('NUTS could not be determined for', not_determined, 'out of', SE_re_df.shape[0], 'facilities in SE.') """ Explanation: Assigning NUTS codes End of explanation """ # Define which columns should be kept columns_to_keep = ['municipality', 'county', 'nuts_1_region', 'nuts_2_region', 'nuts_3_region', 'lat', 'lon', 'energy_source_level_1', 'energy_source_level_2', 'technology', 'se_vindbrukskollen_id', 'site_name', 'manufacturer', 'electrical_capacity', 'commissioning_date', 'data_source'] # Keep only the selected columns SE_re_df = SE_re_df.loc[:, columns_to_keep] """ Explanation: Select the columns to keep End of explanation """ visualize_points(SE_re_df['lat'], SE_re_df['lon'], 'Sweden', categories=SE_re_df['technology'] ) """ Explanation: Visualize End of explanation """ SE_re_df.reset_index(inplace=True, drop=True) SE_re_df.to_pickle('intermediate/SE_renewables.pickle') del SE_re_df """ Explanation: Save End of explanation """ # Download the data and get the local paths to the corresponding files print('Start:', datetime.datetime.now()) downloader = Downloader(version, input_directory_path, source_list_filepath, download_from) filepaths = downloader.download_data_for_country('CZ') print('End:', datetime.datetime.now()) CZ_re_filepath = filepaths['ERU'] CZ_geo_filepath = filepaths['Geonames'] CZ_postcode2nuts_filepath = filepaths['Eurostat'] # Define a converter for CZ postcode strings def to_cz_postcode_format(postcode_str): return postcode_str[:3] + ' ' + postcode_str[3:] # Read the data from the csv file CZ_re_df = pd.read_csv(CZ_re_filepath, escapechar='\\', dtype = { 'number_of_sources' : int, }, parse_dates=['licence_approval_date'], infer_datetime_format=True, converters = { 'site_postcode' : to_cz_postcode_format, 'holder_postcode' : to_cz_postcode_format } ) # Show a few rows CZ_re_df.head(5) """ Explanation: Czech Republic The data for Czech Republic are provided by the following source: - ERU (EnergetickÃœ regulační úřad, Energy Regulatory Office) - Administrative authority responsible for regulation in the energy sector. Provides the data on renewable energy plants in Czech Republic. Download and read the data Downloading the data from the original source may take 1-2 hours because it's done by scraping the information from HTML pages. If downloading fails because of the ERU's server refusing connections: - pause and wait for some time; - delete the file eru.csv in the CZ input directory; - try downloading again. Alternatively, you can download the data from the OPSD server. End of explanation """ CZ_re_df.dtypes """ Explanation: Let's inspect the dataframe's columns: End of explanation """ mwe_columns = [col for col in CZ_re_df.columns if 'megawatts_electric' in col and col != 'megawatts_electric_total'] mwt_columns = [col for col in CZ_re_df.columns if 'megawatts_thermal' in col and col != 'megawatts_thermal_total'] def count_types(row): global mwe_columns different_types = sum([row[col] > 0 for col in mwe_columns]) return different_types CZ_re_df.apply(count_types, axis=1).value_counts() """ Explanation: It contains 30 columns: - site_name, site_region, site_postcode, site_locality, site_district give us basic information on the site; - megawatts_electric_total shows us the total electric capacity of the site; - Since each site can use different types of energy, megawatts_electric_hydro, megawatts_electric_solar, megawatts_electric_biogas_and_biomass, megawatts_electric_wind, megawatts_electric_unspecified show us how total capacity breaks down to those renewable types from the OPSD energy hierarchy; - The columns beginning with megawatts_thermal_ represent the amiunt of input energy required (and will be equal to zero in most cases); - watercourse and watercourse_length_km represent the name and length of the watercourse used by the site (if any); - holder_name, holder_region, holder_address, holder_postcode, holder_locality, holder_district, holder_representative give us basic information on the site's owner; - licence_number and licence_approval_date show us the licence number given to the holder and its approval date. - link points to the ERU page with the site's data in HTML. Since some sites use conventional types of energy, it is possible that megawatts_electric_total &gt; megawatts_electric_hydro + megawatts_electric_solar + megawatts_electric_biogas_and_biomass + megawatts_electric_wind + megawatts_electric_unspecified. If the sum of renewable-energy capacities is equal to zero, that means that the correspoding row actually represents a conventional powerplant, so it should be excluded. Let us now check how many sites use how many types of renewable energy sources. End of explanation """ # Drop empty columns and rows CZ_re_df.dropna(axis='index', how='all', inplace=True) CZ_re_df.dropna(axis='columns', how='all', inplace=True) # Drop rows with no data on electrical capacity and the rows where total electrical capacity is 0 empty_mask = (CZ_re_df['megawatts_electric_total'] == 0) | (CZ_re_df['megawatts_electric_total'].isnull()) CZ_re_df = CZ_re_df.loc[~empty_mask] CZ_re_df.reset_index(inplace=True, drop=True) # Replace NANs with zeroes in mwe and mwt columns replacement_dict = {col : 0 for col in mwe_columns + mwt_columns} CZ_re_df.fillna(replacement_dict, inplace=True) # Drop the rows where renewable-energy share of the total capacity is equal to zero conventional_mask = (CZ_re_df['megawatts_electric_hydro'] + CZ_re_df['megawatts_electric_solar'] + CZ_re_df['megawatts_electric_biogas_and_biomass'] + CZ_re_df['megawatts_electric_wind'] + CZ_re_df['megawatts_electric_unspecified']) == 0 CZ_re_df = CZ_re_df.loc[~conventional_mask] CZ_re_df.reset_index(inplace=True, drop=True) """ Explanation: As of April 2020, as we can see in the output above, there are only 4 sites which use more than one type of renewable energy, and there are 193 sites which do not use renewable energy at all. Clean the data End of explanation """ # Define the function which will extract the data about the type of energy specified by the given column # and return it as a dataframe in the "long format" def select_and_reformat(df, column): # Use the mwe and mwt columns defined above global mwe_columns global mwt_columns # Declare the given column and its mwt counterpart as exceptions mwt_exception = column.replace('electric', 'thermal') exceptions = [column, mwt_exception] # Exclude all the mwe and mwt columns which do not correspond to the given energy type columns_to_skip = [col for col in mwe_columns + mwt_columns if col not in exceptions] # Keep all the other columns columns_to_keep = [col for col in df.columns if col not in columns_to_skip] # Find the stations which use the given type of energy selection_mask = (df[column] > 0) # Keep them and select the columns we decided to keep selection_df = df[selection_mask][columns_to_keep] # Create a new column which will indicate the energy type selection_df['energy_type'] = " ".join(column.split('_')[2:]) # Remove the energy type name from the columns representing electrical capacity # and megawatts thermal selection_df.rename(columns = {column : 'electrical_capacity', mwt_exception : 'megawatts_thermal'}, inplace=True) selection_df.drop(columns=['megawatts_electric_total', 'megawatts_thermal_total'], inplace=True) # Ensure the rows are properly indexed as 0,1,2,... selection_df.reset_index(inplace=True, drop=True) return selection_df # Create a dataframe for each energy type dataframes = [] for column in mwe_columns: selection = select_and_reformat(CZ_re_df, column) energy_type = selection['energy_type'].unique()[0] dataframes.append(selection) # Concatenate the dataframes CZ_re_df = pd.concat(dataframes, ignore_index=False) CZ_re_df.reset_index(inplace=True, drop=True) """ Explanation: Reformat the data There are sites which use different types of renewable source to produce electric energy. Those are the sites where at least two of the following columns are not equal to zero: megawatts_electric_hydro, megawatts_electric_solar, megawatts_electric_biogas_and_biomass, megawatts_electric_wind, megawatts_electric_unspecified. The data that come in this shape are said to be in the so called wide format. For the purpose of our later processing, it would be more convenient to have the data where each row is associated to one and only one type of energy (the so called long format). Therefore, we must first restructure our data from the wide to long format. End of explanation """ CZ_re_df """ Explanation: Let us see what is this restructured dataframe like. End of explanation """ # Choose the translation terms for CZ and create the translation dictionary idx_CZ = columnnames[columnnames['country'] == 'CZ'].index column_dict_CZ = columnnames.loc[idx_CZ].set_index('original_name')['opsd_name'].to_dict() # Show the dictionary column_dict_CZ # Translate column names CZ_re_df.rename(columns=column_dict_CZ, inplace=True) """ Explanation: The number of columns has been reduced as we have transformed the data to the long format. The rows representning conventional power plants have been excluded. Since only few sites use multiple types of energy, the total number of rows has not increased. Translate column names End of explanation """ # Choose the translation terms for Czech Republic idx_CZ = valuenames[valuenames['country'] == 'CZ'].index # Choose the translation terms for energy source level 3 energy3_dict_CZ = valuenames.loc[idx_CZ].set_index('original_name')['opsd_name'].to_dict() energy3_dict_CZ # Add energy source level 3 CZ_re_df['energy_source_level_3'] = CZ_re_df['technology'].replace(energy3_dict_CZ) # Choose the terms for energy source level 2 energy2_dict_CZ = valuenames.loc[idx_CZ].set_index('original_name')['energy_source_level_2'].to_dict() CZ_re_df['energy_source_level_2'] = CZ_re_df['technology'].replace(energy2_dict_CZ) # Standardize the values for technology # 1. np.nan means that technology should not be specified for the respective kind of sources # according to the hierarchy (http://open-power-system-data.org/2016-10-25-opsd_tree.svg) # 2. 'Other or unspecified technology' means that technology should be specified # but it was unclear or missing in the original dataset. technology_dict = { 'biogas and biomass' : np.nan, 'wind' : 'Onshore', 'solar' : 'Other or unspecified technology', 'hydro' : 'Run-of-river', 'unspecified' : np.nan } CZ_re_df['technology'] = CZ_re_df['technology'].replace(technology_dict) # Add energy_source_level_1 CZ_re_df['energy_source_level_1'] = 'Renewable energy' # Show the hierarchy of sources present in the dataset CZ_re_df[['energy_source_level_1', 'energy_source_level_2', 'energy_source_level_3', 'technology']].drop_duplicates() """ Explanation: Translate values and harmonize energy levels End of explanation """ CZ_re_df['data_source'] = 'ERU' """ Explanation: Add data source End of explanation """ # Get geo-information zip_CZ_geo = zipfile.ZipFile(CZ_geo_filepath) # Read generated postcode/location file CZ_geo = pd.read_csv(zip_CZ_geo.open('CZ.txt'), sep='\t', header=None) # add column names as defined in associated readme file CZ_geo.columns = ['country_code', 'postcode', 'place_name', 'admin_name1', 'admin_code1', 'admin_name2', 'admin_code2', 'admin_name3', 'admin_code3', 'lat', 'lon', 'accuracy'] # Drop rows of possible duplicate postal_code CZ_geo.drop_duplicates('postcode', keep='last', inplace=True) # Add longitude/latitude infomation assigned by postcode CZ_re_df = pd.merge(CZ_re_df, CZ_geo[['lat', 'lon', 'postcode']], left_on='postcode', right_on='postcode', how='left' ) """ Explanation: Georeferencing End of explanation """ CZ_postcode2nuts_filepath = filepaths['Eurostat'] CZ_re_df = nuts_converter.add_nuts_information(CZ_re_df, 'CZ', CZ_postcode2nuts_filepath, how=['postcode']) # Report the number of facilites whose NUTS codes were successfully determined determined = CZ_re_df['nuts_1_region'].notnull().sum() print('NUTS successfully determined for', determined, 'out of', CZ_re_df.shape[0], 'facilities in CZ.') # Report the number of facilites whose NUTS codes could not be determined not_determined = CZ_re_df['nuts_1_region'].isnull().sum() print('NUTS could not be determined for', not_determined, 'out of', CZ_re_df.shape[0], 'facilities in CZ.') """ Explanation: Assign NUTS codes End of explanation """ # Define which columns should be kept columns_to_keep = ['site_name', 'region', 'municipality', 'locality', 'postcode', 'nuts_1_region', 'nuts_2_region', 'nuts_3_region', 'lat', 'lon', 'energy_source_level_1', 'energy_source_level_2', 'energy_source_level_3', 'technology', 'owner', 'electrical_capacity', 'data_source'] # Keep only the selected columns CZ_re_df = CZ_re_df.loc[:, columns_to_keep] """ Explanation: Select the columns to keep End of explanation """ CZ_re_df.drop_duplicates(inplace=True) CZ_re_df.reset_index(drop=True, inplace=True) """ Explanation: Drop duplicates End of explanation """ visualize_points(CZ_re_df['lat'], CZ_re_df['lon'], 'Czechia', categories=CZ_re_df['energy_source_level_2'] ) """ Explanation: Visualuze End of explanation """ CZ_re_df.reset_index(inplace=True, drop=True) CZ_re_df.to_pickle('intermediate/CZ_renewables.pickle') del CZ_re_df """ Explanation: Save End of explanation """ zip_archive = zipfile.ZipFile(input_directory_path + '.zip', 'w', zipfile.ZIP_DEFLATED) print("Zipping the raw files...") for filename in os.listdir(input_directory_path): print("Adding", filename, "to the zip.") filepath = os.path.join(input_directory_path, filename) zip_archive.write(filepath) zip_archive.close() print("Done!") #shutil.rmtree(input_directory_path) """ Explanation: Zip the raw data End of explanation """
dh7/ML-Tutorial-Notebooks
Fizz Buzz.ipynb
bsd-2-clause
import numpy as np import tensorflow as tf """ Explanation: Fizz Buzz with Tensor Flow. This notebook to explain the code from Fizz Buzz in Tensor Flow blog post written by Joel Grus You should read his post first it is super funny! His code try to play the Fizz Buzz game by using machine learning. This notebook is for real beginners who whant to understand the basis of TensorFlow by reading code. Feedback welcome @dh7net Let's start! The code contain several part: * Create the training set * Encode the input (a number) * Encode the result (fizz or buzz, none or both?) * create the training set * Build a model * Train the model * Create a cost function * Iterate * Make prediction End of explanation """ NUM_DIGITS = 10 def binary_encode(i, num_digits): return np.array([i >> d & 1 for d in range(num_digits)]) #Let's check if it works for i in range(10): print i, binary_encode(i, NUM_DIGITS) """ Explanation: Create the trainning set Encode the input (a number) This example convert the number to a binary representation End of explanation """ def fizz_buzz_encode(i): if i % 15 == 0: return np.array([0, 0, 0, 1]) elif i % 5 == 0: return np.array([0, 0, 1, 0]) elif i % 3 == 0: return np.array([0, 1, 0, 0]) else: return np.array([1, 0, 0, 0]) def fizz_buzz(i, prediction): return [str(i), "fizz", "buzz", "fizzbuzz"][prediction] # let'see how the encoding works for i in range(1, 16): print i, fizz_buzz_encode(i) # and the decoding for i in range(1, 16): fizz_or_buzz_number = np.argmax(fizz_buzz_encode(i)) print i, fizz_or_buzz_number, fizz_buzz(i, fizz_or_buzz_number) """ Explanation: Encode the result (fizz or buzz, none or both?) The fizz_buzz function calculate what the output should be, an encoded it to a 4 dimention vector. The fizz_buzz function take a number and a prediction, and output a string End of explanation """ training_size = 2 ** NUM_DIGITS print "Size of the set:", training_size trX = np.array([binary_encode(i, NUM_DIGITS) for i in range(101, training_size)]) trY = np.array([fizz_buzz_encode(i) for i in range(101, training_size)]) print "First 15 values:" for i in range(101, 116): print i, trX[i], trY[i] """ Explanation: Create the training set End of explanation """ def init_weights(shape): return tf.Variable(tf.random_normal(shape, stddev=0.01)) """ Explanation: Creation of the model The model is made of: * one hidden layer that contains 100 neurons * one output layer The input is fully connected to the hidden layer and a relu function is applyed The relu function is a rectifier that just output zero if the input is negative. First we'll define an helper function to initialise parameters with randoms values End of explanation """ NUM_HIDDEN = 100 #Number of neuron in the hidden layer X = tf.placeholder("float", [None, NUM_DIGITS]) Y = tf.placeholder("float", [None, 4]) w_h = init_weights([NUM_DIGITS, NUM_HIDDEN]) w_o = init_weights([NUM_HIDDEN, 4]) """ Explanation: X is the input Y is the output w_h are the parameters between the input and the hidden layer w_o are the parameters between the hidden layer and the output End of explanation """ def model(X, w_h, w_o): h = tf.nn.relu(tf.matmul(X, w_h)) return tf.matmul(h, w_o) py_x = model(X, w_h, w_o) """ Explanation: To create the model we apply the w_h parameters to the input, and then we aply the relu function to calculate the value of the hidden layer. The w_o coeefient are used to calculate the output layer. No rectification is applyed py_x is the predicted value for a given input represented as a vector (dimention 4) End of explanation """ cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y)) """ Explanation: Training Create the cost function The cost function measure how bad the model is. It is the distance between the prediction (py_x) and the reality (Y). End of explanation """ train_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost) """ Explanation: softmax_cross_entropy_with_logits(py_x, Y) measure the distance between py_x and Y. SoftMax is the classical way to measure the distance between a predicted result and the actual result in a cost function. reduce_mean calculate the mean of a tensor. In this case the mean of the distance for the whole training set Train the model Training a model in TensorFlow is extremly simple, you just define a trainer operator! End of explanation """ predict_op = tf.argmax(py_x, 1) """ Explanation: This operator will minimize the cost using the Gradient Descent witch is the most common optimizer to find parameters than will minimise the cost. We'll also define a prediction operator that will be able to output a prediction. * 0 means no fizz no buzz * 1 means fizz * 2 means buzz * 3 means fizzbuzz End of explanation """ BATCH_SIZE = 128 """ Explanation: Iterate until the model is good enough One epoch consists of one full training cycle on the training set. Once every sample in the set is seen, you start again - marking the beginning of the 2nd epoch. source The training set is randomly permuted between each epoch. The learning is not done on the full set at once. Instead the learning set is divided in small batch and the learning is done for each of them. End of explanation """ #random permutation of the index will be used during the training for each epoch permutation_index = np.random.permutation(range(len(trX))) for start in range(0, len(trX), BATCH_SIZE): end = start + BATCH_SIZE print "Batch starting at", start print permutation_index[start:end] # Launch the graph in a session sess = tf.Session() tf.initialize_all_variables().run(session=sess) for epoch in range(5000): # Shuffle the data before each training iteration. p = np.random.permutation(range(len(trX))) trX, trY = trX[p], trY[p] # Train in batches of 128 inputs. for start in range(0, len(trX), BATCH_SIZE): end = start + BATCH_SIZE sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end]}) # And print the current accuracy on the training data. if (epoch%100==0): # each 100 epoch, to not overflow the jupyter log # np.mean(A==B) return a number between 0 and 1. (true_count/total_count) print(epoch, np.mean(np.argmax(trY, axis=1) == sess.run(predict_op, feed_dict={X: trX, Y: trY}))) # And now for some fizz buzz numbers = np.arange(1, 101) teX = np.transpose(binary_encode(numbers, NUM_DIGITS)) teY = sess.run(predict_op, feed_dict={X: teX}) output = np.vectorize(fizz_buzz)(numbers, teY) print output sess.close() # don't forget to close the session if you don't use it anymore. Or use the *with* statement. # Lets check the quality Y = np.array([fizz_buzz_encode(i) for i in range(1,101)]) print "accuracy", np.mean(np.argmax(Y, axis=1) == teY) for i in range(1,100): actual = fizz_buzz(i, np.argmax(fizz_buzz_encode(i))) predicted = output[i-1] ok = True if actual <> predicted: ok = False print i, "{:>8}".format(actual), "{:>8}".format(predicted), ok """ Explanation: Here an example of index used for one epoch: End of explanation """
stsouko/CGRtools
doc/tutorial/2_signatures.ipynb
lgpl-3.0
import pkg_resources if pkg_resources.get_distribution('CGRtools').version.split('.')[:2] != ['4', '0']: print('WARNING. Tutorial was tested on 4.0 version of CGRtools') else: print('Welcome!') # load data for tutorial from pickle import load from traceback import format_exc with open('molecules.dat', 'rb') as f: molecules = load(f) # list of MoleculeContainer objects with open('reactions.dat', 'rb') as f: reactions = load(f) # list of ReactionContainer objects m1, m2, m3 = molecules[:3] # molecule m7 = m3.copy() m11 = m3.copy() m11.standardize() m7.standardize() r1 = reactions[0] # reaction m1.delete_atom(3) cgr2 = ~r1 benzene = m3.substructure([4,5,6,7,8,9]) m3.delete_bond(4, 5) """ Explanation: 2. Signatures and duplicates selection (c) 2019, 2020 Dr. Ramil Nugmanov; (c) 2019 Dr. Timur Madzhidov; Ravil Mukhametgaleev Installation instructions of CGRtools package information and tutorial's files see on https://github.com/stsouko/CGRtools NOTE: Tutorial should be performed sequentially from the start. Random cell running will lead to unexpected results. End of explanation """ ms2 = str(m2) # get and print signature print(ms2) # or print(m2) hms2 = bytes(m2) # get sha512 hash of signature as bytes-string m2 """ Explanation: 2.1. Molecule Signatures MoleculeContainer has methods for unique molecule signature generation. Signature is SMILES string with canonical atoms ordering. For signature generation one need to call str function on MoleculeContainer object. Fixed length hash of signature could be retrieved by calling bytes function on molecule (correspond to SHA 512 bitstring). Order of atoms calculated by Morgan-like algorithm. On initial state for each atoms it's integer code calculated based on its type. All bonds incident to atoms also coded as integers and stored in sorted tuple. Atom code and tuple of it's bonds used for ordering and similar atoms detecting. Ordered atoms rank is replaced with new integer code. Atoms of the same type with the same bonds types incident to it have equal numbers. Numbers codes found are used in Morgan algorithm cycle. Loop is repeated until all atoms will be unique or number of unique atoms will not change in 3 subsequent loops. End of explanation """ print(f'f string {m2}') # use signature in string formatting print('C-style string %s' % m2) print('format method {}'.format(m2)) """ Explanation: String formatting is supported that is useful for reporting End of explanation """ mq = m2.substructure(m2, as_query=True) print(f'{mq}') # get signatures with hybridization and neighbors data print('{:!n}'.format(mq)) # get signature with hybridization only data # h - hybridization marks, n- neighbors marks print(format(mq, '!h')) # include only number of neighbors in signature print(f'{mq:!n!h}') # hide all data mq """ Explanation: For Queries number of neighbors and hybridization will be added to signature. Note that in this case they are not readable as SMILES. But possible to hide this data. End of explanation """ m1 m2 m1 != m2 # different molecules m7 m7 == m11 # copy of the same molecule m7 is m11 # this is not same objects! # Simplest way to exclude duplicated structures len({m1, m2, m7, m11}) == 3 # create set of unique molecules. Only 3 of them were different. """ Explanation: Atoms in the QueryContainer and QueryCGRContainer signature are represented in the following way: [isotope;element_symbol;hn;charge;radical state]. h mean hybridization, n - number of neighbors. Notation for hybridization is the following: s - all bonds of atom are single d - atom has one double bond and others are single t - atom has one triple or two double bonds and other are single a - atom is in aromatic ring Examples: s1 - atom has s hybridization and one neighbor d3 - atom has d hybridization and 3 neighbors Signatures for CGRContainer include only radical state marks additionally to common SMILES notation. Molecules comparable and hashable Comparison of MoleculeContainer is based on its signatures. Moreover, since strings in Python are hashable, MoleculeContaier also hashable. NOTE: MoleculeContainer can be changed. This can lead to unobvious behavior of the sets and dictionaries in which these molecules were placed before the change. Avoid changing molecules (standardize, aromatize, hydrogens and atoms/bonds changes) placed inside sets and dictionaries. End of explanation """ str(r1) """ Explanation: 2.2. Reaction signatures ReactionContainer have its signature. Signature is SMIRKS string in which molecules of reactants, reagents, products presented in canonical order. API is the same as for molecules End of explanation """ str(cgr2) cgr2.clean2d() cgr2 """ Explanation: 2.3. CGR signature CGRContainer have its signature. Signatures is SMIRKS-like strings where dynamic bond labels and dynamic atoms are also specified within squared brackets, so not only atoms but bonds could be written in brackets if a bond has complex parameters. Dynamic bonds in CGR have special label representing changes in bond orders. Dynamic atom corresponds to a change of formal charge or radical state of atom in reaction. Their labels are also given in brackets, including the atom symbol and text keys for atomic property in reactant and product, separated by symbol >. For a neutral atom A gaining a positive charge +n in reaction dynamic atom will be encoded as [A0>+n]. In case of charges +1 and -1, the number 1 is omitted. Properties for charges and radicals may be combined consecutively within one pair of brackets, e.g. [A0>-^>*] stands for an atom which becomes an anion-radical. End of explanation """
vbsteja/code
Python/ML_DL/DL/Neural-Networks-Demystified-master/.ipynb_checkpoints/Part 4 Backpropagation-checkpoint.ipynb
apache-2.0
from IPython.display import YouTubeVideo YouTubeVideo('GlcnxUlrtek') """ Explanation: <h1 align = 'center'> Neural Networks Demystified </h1> <h2 align = 'center'> Part 4: Backpropagation </h2> <h4 align = 'center' > @stephencwelch </h4> End of explanation """ %pylab inline #Import code from last time from partTwo import * def sigmoid(z): #Apply sigmoid activation function to scalar, vector, or matrix return 1/(1+np.exp(-z)) def sigmoidPrime(z): #Derivative of sigmoid function return np.exp(-z)/((1+np.exp(-z))**2) testValues = np.arange(-5,5,0.01) plot(testValues, sigmoid(testValues), linewidth=2) plot(testValues, sigmoidPrime(testValues), linewidth=2) grid(1) legend(['sigmoid', 'sigmoidPrime']) """ Explanation: <h3 align = 'center'> Variables </h3> |Code Symbol | Math Symbol | Definition | Dimensions | :-: | :-: | :-: | :-: | |X|$$X$$|Input Data, each row in an example| (numExamples, inputLayerSize)| |y |$$y$$|target data|(numExamples, outputLayerSize)| |W1 | $$W^{(1)}$$ | Layer 1 weights | (inputLayerSize, hiddenLayerSize) | |W2 | $$W^{(2)}$$ | Layer 2 weights | (hiddenLayerSize, outputLayerSize) | |z2 | $$z^{(2)}$$ | Layer 2 activation | (numExamples, hiddenLayerSize) | |a2 | $$a^{(2)}$$ | Layer 2 activity | (numExamples, hiddenLayerSize) | |z3 | $$z^{(3)}$$ | Layer 3 activation | (numExamples, outputLayerSize) | |J | $$J$$ | Cost | (1, outputLayerSize) | |dJdz3 | $$\frac{\partial J}{\partial z^{(3)} } = \delta^{(3)}$$ | Partial derivative of cost with respect to $z^{(3)}$ | (numExamples,outputLayerSize)| |dJdW2|$$\frac{\partial J}{\partial W^{(2)}}$$|Partial derivative of cost with respect to $W^{(2)}$|(hiddenLayerSize, outputLayerSize)| |dz3dz2|$$\frac{\partial z^{(3)}}{\partial z^{(2)}}$$|Partial derivative of $z^{(3)}$ with respect to $z^{(2)}$|(numExamples, hiddenLayerSize)| |dJdW1|$$\frac{\partial J}{\partial W^{(1)}}$$|Partial derivative of cost with respect to $W^{(1)}$|(inputLayerSize, hiddenLayerSize)| |delta2|$$\delta^{(2)}$$|Backpropagating Error 2|(numExamples,hiddenLayerSize)| |delta3|$$\delta^{(3)}$$|Backpropagating Error 1|(numExamples,outputLayerSize)| Last time, we decided to use gradient descent to train our Neural Network, so it could make better predictions of your score on a test based on how many hours you slept, and how many hours you studied the night before. To perform gradient descent, we need an equation and some code for our gradient, dJ/dW. Our weights, W, are spread across two matrices, W1 and W2. We’ll separate our dJ/dW computation in the same way, by computing dJdW1 and dJdW2 independently. We should have just as many gradient values as weight values, so when we’re done, our matrices dJdW1 and dJdW2 will be the same size as W1 and W2. $$ \frac{\partial J}{\partial W^{(2)}} = \frac{\partial \sum \frac{1}{2}(y-\hat{y})^2}{\partial W^{(2)}} $$ Let’s work on dJdW2 first. The sum in our cost function adds the error from each example to create our overall cost. We’ll take advantage of the sum rule in differentiation, which says that the derivative of the sums equals the sum of the derivatives. We can move our sigma outside and just worry about the derivative of the inside expression first. $$ \frac{\partial J}{\partial W^{(2)}} = \sum \frac{\partial \frac{1}{2}(y-\hat{y})^2}{\partial W^{(2)}} $$ To keep things simple, we’ll temporarily forget about our summation. Once we’ve computed dJdW for a single example, we’ll add all our individual derivative terms together. We can now evaluate our derivative. The power rule tells us to bring down our exponent, 2, and multiply. To finish our derivative, we’ll need to apply the chain rule. The chain rule tells us how to take the derivative of a function inside of a function, and generally says we take the derivative of the outside function and then multiply it by the derivative of the inside function. One way to express the chain rule is as the product of derivatives, this will come in very handy as we progress through backpropagation. In fact, a better name for backpropagation might be: don’t stop doing the chain rule. ever. We’ve taken the derivative of the outside of our cost function - now we need to multiply it by the derivative of the inside. Y is just our test scores, which won’t change, so the derivative of y, a constant, with respect to W two is 0! yHat, on the other hand, does change with respect to W two, so we’ll apply the chain rule and multiply our results by minus dYhat/dW2. $$ \frac{\partial J}{\partial W^{(2)}} = -(y-\hat{y}) \frac{\partial \hat{y}}{\partial W^{(2)}} $$ We now need to think about the derivative of yHat with respect to W2. Equation 4 tells us that yHat is our activation function of z3, so we it will be helpful to apply the chain rule again to break dyHat/dW2 into dyHat/dz3 times dz3/dW2. $$ \frac{\partial J}{\partial W^{(2)}} = -(y-\hat{y}) \frac{\partial \hat{y}}{\partial z^{(3)}} \frac{\partial z^{(3)}}{\partial W^{(2)}} $$ To find the rate of change of yHat with respect to z3, we need to differentiate our sigmoid activation function with respect to z. $$ f(z) = \frac{1}{1+e^{-z}} $$ $$ f^\prime(z) = \frac{e^{-z}}{(1+e^{-z})^2} $$ Now is a good time to add a new python method for the derivative of our sigmoid function, sigmoid Prime. Our derivative should be the largest where our sigmoid function is the steepest, at the value z equals zero. End of explanation """ # Part of NN Class (won't work alone, needs to be included in class as # shown in below and in partFour.py): def costFunctionPrime(self, X, y): #Compute derivative with respect to W and W2 for a given X and y: self.yHat = self.forward(X) delta3 = np.multiply(-(y-self.yHat), self.sigmoidPrime(self.z3)) dJdW2 = np.dot(self.a2.T, delta3) """ Explanation: We can now replace dyHat/dz3 with f prime of z 3. $$ \frac{\partial z^{(3)}}{\partial W^{(2)}}= -(y-\hat{y}) f^\prime(z^{(3)}) \frac{\partial z^{(3)}}{\partial W^{(2)}} $$ Our final piece of the puzzle is dz3dW2, this term represents the change of z, our third layer activity, with respect to the weights in the second layer. Z three is the matrix product of our activities, a two, and our weights, w two. The activities from layer two are multiplied by their correspond weights and added together to yield z3. If we focus on a single synapse for a moment, we see a simple linear relationship between W and z, where a is the slope. So for each synapse, dz/dW(2) is just the activation, a on that synapse! $$ z^{(3)} = a^{(2)}W^{(2)} \tag{3}\ $$ Another way to think about what the calculus is doing here is that it is “backpropagating” the error to each weight, by multiplying by the activity on each synapses, the weights that contribute more to the error will have larger activations, and yield larger dJ/dW2 values, and those weights will be changed more when we perform gradient descent. We need to be careful with our dimensionality here, and if we’re clever, we can take care of that summation we got rid of earlier. The first part of our equation, y minus yHat is of the same dimension as our output data, 3 by 1. F prime of z three is of the same size, 3 by 1, and our first operation is scalar multiplication. Our resulting 3 by 1 matrix is referred to as the backpropagating error, delta 3. We determined that dz3/dW2 is equal to the activity of each synapse. Each value in delta 3 needs to be multiplied by each activity. We can achieve this by transposing a2 and matrix multiplying by delta3. $$ \frac{\partial J}{\partial W^{(2)}} = (a^{(2)})^T\delta^{(3)}\tag{6} $$ $$ \delta^{(3)} = -(y-\hat{y}) f^\prime(z^{(3)}) $$ What’s cool here is that the matrix multiplication also takes care of our earlier omission – it adds up the dJ/dW terms across all our examples. Another way to think about what’s happening here is that is that each example our algorithm sees has a certain cost and a certain gradient. The gradient with respect to each example pulls our gradient descent algorithm in a certain direction. It's like every example gets a vote on which way is downhill, and when we perform batch gradient descent we just add together everyone’s vote, call it downhill, and move in that direction. We’ll code up our gradients in python in a new method, cost function prime. Numpy’s multiply method performs element-wise multiplication, and the dot method performs matrix multiplication. End of explanation """ # Whole Class with additions: class Neural_Network(object): def __init__(self): #Define Hyperparameters self.inputLayerSize = 2 self.outputLayerSize = 1 self.hiddenLayerSize = 3 #Weights (parameters) self.W1 = np.random.randn(self.inputLayerSize,self.hiddenLayerSize) self.W2 = np.random.randn(self.hiddenLayerSize,self.outputLayerSize) def forward(self, X): #Propogate inputs though network self.z2 = np.dot(X, self.W1) self.a2 = self.sigmoid(self.z2) self.z3 = np.dot(self.a2, self.W2) yHat = self.sigmoid(self.z3) return yHat def sigmoid(self, z): #Apply sigmoid activation function to scalar, vector, or matrix return 1/(1+np.exp(-z)) def sigmoidPrime(self,z): #Gradient of sigmoid return np.exp(-z)/((1+np.exp(-z))**2) def costFunction(self, X, y): #Compute cost for given X,y, use weights already stored in class. self.yHat = self.forward(X) J = 0.5*sum((y-self.yHat)**2) return J def costFunctionPrime(self, X, y): #Compute derivative with respect to W and W2 for a given X and y: self.yHat = self.forward(X) delta3 = np.multiply(-(y-self.yHat), self.sigmoidPrime(self.z3)) dJdW2 = np.dot(self.a2.T, delta3) delta2 = np.dot(delta3, self.W2.T)*self.sigmoidPrime(self.z2) dJdW1 = np.dot(X.T, delta2) return dJdW1, dJdW2 """ Explanation: We have one final term to compute: dJ/dW1. The derivation begins the same way, computing the derivative through our final layer: first dJ/dyHat, then dyHat/dz3, and we called these two taken together form our backpropagating error, delta3. We now take the derivative “across” our synapses, this is a little different from out job last time, computing the derivative with respect to the weights on our synapses. $$ \frac{\partial J}{\partial W^{(1)}} = (y-\hat{y}) \frac{\partial \hat{y}}{\partial W^{(1)}} $$ $$ \frac{\partial J}{\partial W^{(1)}} = (y-\hat{y}) \frac{\partial \hat{y}}{\partial z^{(3)}} \frac{\partial z^{(3)}}{\partial W^{(1)}} $$ $$ \frac{\partial J}{\partial W^{(1)}} = -(y-\hat{y}) f^\prime(z^{(3)}) \frac{\partial z^{(3)}}{\partial W^{(1)}} $$ $$ \frac{\partial z^{(3)}}{\partial W^{(1)}} = \frac{\partial z^{(3)}}{\partial a^{(2)}}\frac{\partial a^{(2)}}{\partial W^{(1)}} $$ There’s still a nice linear relationship along each synapse, but now we’re interested in the rate of change of z(3) with respect to a(2). Now the slope is just equal to the weight value for that synapse. We can achieve this mathematically by multiplying by W(2) transpose. $$ \frac{\partial J}{\partial W^{(1)}} = \delta^{(3)} (W^{(2)})^{T} \frac{\partial a^{(2)}}{\partial W^{(1)}} $$ $$ \frac{\partial J}{\partial W^{(1)}} = \delta^{(3)} (W^{(2)})^{T} \frac{\partial a^{(2)}}{\partial z^{(2)}} \frac{\partial z^{(2)}}{\partial W^{(1)}} $$ Our next term to work on is da(2)/dz(2) – this step is just like the derivative across our layer 3 neurons, so we can just multiply by f prime(z2). $$ \frac{\partial J}{\partial W^{(1)}} = \delta^{(3)} (W^{(2)})^{T} f^\prime(z^{(2)}) \frac{\partial z^{(2)}}{\partial W^{(1)}} $$ Our final computation here is dz2/dW1. This is very similar to our dz3/dW2 computation, there is a simple linear relationship on the synapses between z2 and w1, in this case though, the slope is the input value, X. We can use the same technique as last time by multiplying by X transpose, effectively applying the derivative and adding our dJ/dW1’s together across all our examples. $$ \frac{\partial J}{\partial W^{(1)}} = X^{T} \delta^{(3)} (W^{(2)})^{T} f^\prime(z^{(2)}) $$ Or: $$ \frac{\partial J}{\partial W^{(1)}} = X^{T}\delta^{(2)} \tag{7} $$ Where: $$ \delta^{(2)} = \delta^{(3)} (W^{(2)})^{T} f^\prime(z^{(2)}) $$ All that’s left is to code this equation up in python. What’s cool here is that if we want to make a deeper neural network, we could just stack a bunch of these operations together. End of explanation """ NN = Neural_Network() cost1 = NN.costFunction(X,y) dJdW1, dJdW2 = NN.costFunctionPrime(X,y) dJdW1 dJdW2 """ Explanation: So how should we change our W’s to decrease our cost? We can now compute dJ/dW, which tells us which way is uphill in our 9 dimensional optimization space. End of explanation """ scalar = 3 NN.W1 = NN.W1 + scalar*dJdW1 NN.W2 = NN.W2 + scalar*dJdW2 cost2 = NN.costFunction(X,y) print cost1, cost2 dJdW1, dJdW2 = NN.costFunctionPrime(X,y) NN.W1 = NN.W1 - scalar*dJdW1 NN.W2 = NN.W2 - scalar*dJdW2 cost3 = NN.costFunction(X, y) print cost2, cost3 """ Explanation: If we move this way by adding a scalar times our derivative to our weights, our cost will increase, and if we do the opposite, subtract our gradient from our weights, we will move downhill and reduce our cost. This simple step downhill is the core of gradient descent and a key part of how even very sophisticated learning algorithms are trained. End of explanation """
shaivaldalal/CS6053_DataScience
HW3_sd3462.ipynb
mit
#Importing basic libraries import pandas as pd import numpy as np from sklearn.tree import DecisionTreeClassifier #Decision Tree import matplotlib.pyplot as plt # To plot graphs from sklearn.metrics import accuracy_score # To test accuracy from sklearn import tree churn=pd.read_csv("../Datasets/Cell2Cell_data.csv") #We set the training data size to 80% and the remaining to the test set. (trainsize) trainsize=0.8 #Setting seed to reproduce results if needed np.random.seed(3462) #Using numpy's random number generator to generate numbers between 0 and 1. We select values less than the training size which is set to 80% indx=np.random.rand(len(churn))<trainsize train_churn=churn[indx] test_churn=churn[~indx] """ Explanation: Introduction to Data Science Homework 3 Solutions Student Name: Shaival Dalal Student Netid: sd3462 Part 1 - Preparing a Training Set and Training a Decision Tree (Total 10 Points) This is a hands-on task where we build a predictive model using Decision Trees discussed in class. For this part, we will be using the data in cell2cell_data.csv (you can find this on NYU Classes). These historical data consist of 39,859 customers: 19,901 customers that churned (i.e., left the company) and 19,958 that did not churn (see the "churndep" variable). Here are the data set's 11 possible predictor variables for churning behavior: ``` Pos. Var. Name Var. Description 1 revenue Mean monthly revenue in dollars 2 outcalls Mean number of outbound voice calls 3 incalls Mean number of inbound voice calls 4 months Months in Service 5 eqpdays Number of days the customer has had his/her current equipment 6 webcap Handset is web capable 7 marryyes Married (1=Yes; 0=No) 8 travel Has traveled to non-US country (1=Yes; 0=No) 9 pcown Owns a personal computer (1=Yes; 0=No) 10 creditcd Possesses a credit card (1=Yes; 0=No) 11 retcalls Number of calls previously made to retention team ``` The 12th column, the dependent variable "churndep", equals 1 if the customer churned, and 0 otherwise. 1. Load the data and prepare it for modeling. Note that the features are already processed for you, so the only thing needed here is split the data into training and testing. Use pandas to create two data frames: train_df and test_df, where train_df has 80% of the data chosen uniformly at random without replacement (test_df should have the other 20%). Also, make sure to write your own code to do the splits. You may use any random() function numpy but do not use the data splitting functions from Sklearn.<br><br> (2 Points) End of explanation """ features_train=train_churn.loc[:,'revenue':'retcalls'] target_train=train_churn.loc[:,'churndep'] dtree=DecisionTreeClassifier(criterion="entropy") trained=dtree.fit(features_train,target_train) """ Explanation: 2. If we had to, how would we prove to ourselves or a colleague that our data was indeed randomly sampled on X? And by prove, I mean empirically, not just showing this person our code. Don't actually do the work, just describe in your own words a test you could here. Hint: think about this in terms of selection bias and use notes from our 2nd lecture. (1 Point) <span style="color:blue">Answer</span> We use the random number generator provided by an external library called NumPy. By using this library we generate unique random numbers which are free from selection bias. We can safely assume that selection bias is non-existent 3. Now build and train a decision tree classifier using DecisionTreeClassifier() (manual page) on train_df to predict the "churndep" target variable. Make sure to use criterion='entropy' when instantiating an instance of DecisionTreeClassifier(). For all other settings you should use all of the default options. (1 Point) End of explanation """ featurelength=np.arange(len(list(features_train))) names=list(features_train) importances=pd.DataFrame({"Features":list(features_train),"Importance":trained.feature_importances_}) importances.sort_values(by='Importance',ascending=False,inplace=True) plt.figure(figsize=(10,5)) plt.title("Feature Importance") plt.bar(featurelength,importances["Importance"],align="center",color="blue") plt.xticks(featurelength,importances["Features"],rotation="60") plt.xlabel('Features') plt.ylabel('Importance') ## We can alternatively use a vertical bar plot to represent information ## ''' plt.barh(featurelength,importances["Importance"],align="center",color="blue") plt.yticks(featurelength,importances["Features"]) plt.ylabel('Features') plt.xlabel('Importance') ''' plt.show() """ Explanation: 4. Using the resulting model from 2.3, show a bar plot of feature names and their feature importance (hint: check the attributes of the DecisionTreeClassifier() object directly in IPython or check the manual!). (3 Points) End of explanation """ column_names=list(churn[importances[:3]["Features"]]) column_names.extend(["churndep"]) churn[column_names].corr() """ Explanation: 5. Is the relationship between the top 3 most important features (as measured here) negative or positive? If your marketing director asked you to explain the top 3 drivers of churn, how would you interpret the relationship between these 3 features and the churn outcome? What "real-life" connection can you draw between each variable and churn? (2 Points) End of explanation """ # Splitting test dataset into target and features features_test=test_churn.loc[:,'revenue':'retcalls'] target_test=test_churn.loc[:,'churndep'] # Predicting target for train and test dataset results_test=trained.predict(features_test) results_train=trained.predict(features_train) test_accuracy=accuracy_score(target_test,results_test) train_accuracy=accuracy_score(target_train,results_train) print("Accuracy for the test dataset is %.3f%% and accuracy for the training dataset is %.3f%%" %(test_accuracy*100,train_accuracy*100)) """ Explanation: The top 3 features are revenue, eqpdays and outcalls. Revenue and Eqpdays are negatively correlated. This means that as revenue slightly increases, eqpdays decrease. We can infer that once the customer stop using the current equipment and starts using the new, company provided equipment, the company's revenues will slightly increase. The relationship is weak in nature. Revenue and outcalls are positively correlated with moderate strength. This means that as the number of outgoing calls of the customer increase, the revenue of the company increases. Eqpdays and outcalls are negatively correlated and the relationship is weak in nature. We can infer that as outgoing calls of customers increase, there is a slight decrease in the eqpdays i.e. the number of days the customer owns their current equipment. This means that customers who make more outgoing calls are somewhat likely to buy new equipment from the company than use their current equipment. The top 3 drivers of churn are monthly revenue (revenue), the number of days the customer has had his/her current equipment (eqpdays) and the mean number of outbound voice calls (outcalls). As our revenue per customer increases i.e. the more they pay, the less likely it is that they will churn. This can be attributed to customers finding satisfaction with services provided by us and voluntarily using more services provided by us. Although, the impact of revenue on churn is weak and almost insignificant. The longer our customers use their current equipment, the more likely it is that they will churn. We need to ensure that the customer purchases equipment(s) from us in order to lock-in the customer leading to greater customer stickiness. The more our customers make outgoing calls, the less likely it is that they will churn. More outgoing calls indicate successful and willing usage of our services. Satisfied customers are less likely to churn. Like revenue, this relation is also weak and almost insignificant. 6. Using the classifier built in 2.3, try predicting "churndep" on both the train_df and test_df data sets. What is the accuracy on each? (1 Point) End of explanation """ # We can use graphviz to visualise the decision tree which may help us # tree.export_graphviz(trained,out_file="DecisionTree") splits=np.arange(10,1000,100) leafnodes=np.arange(10,1000,100) """ Explanation: Part 2 - Finding a Good Decision Tree (Total 10 Points) The default options for your decision tree may not be optimal. We need to analyze whether tuning the parameters can improve the accuracy of the classifier. For the following options min_samples_split and min_samples_leaf: 1. Generate a list of 10 values of each for the parameters min_samples_split and min_samples_leaf. (1 Point) End of explanation """ def DtreeIter(train_features,train_target,test_features,test_target,samplesplit,sampleleaf): treeOpt=DecisionTreeClassifier(criterion="entropy",min_samples_split=samplesplit,min_samples_leaf=sampleleaf) treeOpt=treeOpt.fit(train_features,train_target) result_Opt=treeOpt.predict(test_features) return accuracy_score(test_target,result_Opt) result_optimise=dict() for values in splits: result_optimise[values]=list() for values in splits: for nodes in leafnodes: result_optimise[values].append([DtreeIter(features_train,target_train,features_test,target_test,values,nodes)]) #To find out best parameters optimal_split=max(result_optimise, key=lambda x: result_optimise[x][1]) optimal_accuracy=max(result_optimise[optimal_split]) optimal_leaf=leafnodes[list(result_optimise[optimal_split]).index(optimal_accuracy)] print("Optimal 'Sample Split Size' is %d and 'Optimal Leaf Samples' are %d. Best accuracy is %.2f%%" %(optimal_split,optimal_leaf,optimal_accuracy[0]*100)) plt.figure(figsize=(10,5)) plt.plot(splits,result_optimise[leafnodes[0]],'b',label='Leaf={}'.format(leafnodes[0])) plt.plot(splits,result_optimise[leafnodes[1]],'r',label='Leaf={}'.format(leafnodes[1])) plt.plot(splits,result_optimise[leafnodes[2]],'y',label='Leaf={}'.format(leafnodes[2])) plt.plot(splits,result_optimise[leafnodes[3]],'g',label='Leaf={}'.format(leafnodes[3])) plt.plot(splits,result_optimise[leafnodes[4]],'c',label='Leaf={}'.format(leafnodes[4])) plt.plot(splits,result_optimise[leafnodes[5]],'m',label='Leaf={}'.format(leafnodes[5])) plt.plot(splits,result_optimise[leafnodes[6]],'k',label='Leaf={}'.format(leafnodes[6])) plt.plot(splits,result_optimise[leafnodes[7]],'b',label='Leaf={}'.format(leafnodes[7])) plt.plot(splits,result_optimise[leafnodes[8]],'r',label='Leaf={}'.format(leafnodes[8])) plt.plot(splits,result_optimise[leafnodes[9]],'y',label='Leaf={}'.format(leafnodes[9])) plt.legend(loc=4) plt.xlabel('Min Sample Splits') plt.ylabel('Accuracy') plt.title('Classifier Accuracy') plt.show() """ Explanation: 2. Explain in words your reasoning for choosing the above ranges. <span style="color:Blue">Answer </span> The model we developed suffers from overfitting as demonstrated by the radical difference in accuracy when run on train and test data set. 1. We choose a higher min_samples_split as the default value of 2 is too low and clearly causes our model to overfit on the training data. Since the size of the data is too large, we select larger values. 2. We choose a higher min_samples_leaf as the default value of 1 is too low. However, we don't want the value to be too high as it can result in the model to demonstrate low variance and high bias. 3. For each combination of values in 3.1 (there should be 100), build a new classifier and check the classifier's accuracy on the test data. Plot the test set accuracy for these options. Use the values of min_samples_split as the x-axis and generate a new series (line) for each of min_samples_leaf. (5 Points) End of explanation """ from scipy import stats from statsmodels.graphics.api import qqplot fever=pd.read_csv("../Datasets/cases.csv") # We can directly read and convert using the read_csv function by using the below command: # fever=pd.read_csv("../Datasets/cases.csv",parse_dates=[0]) fever["YEAR"]=pd.to_datetime(fever["YEAR"],format="%Y") """ Explanation: 4. Which configuration returns the best accuracy? What is this accuracy? (Note, if you don't see much variation in the test set accuracy across values of min_samples_split or min_samples_leaf, try redoing the above steps with a different range of values). (1 Point) <span style="color:Blue">Answer </span> When we set the Sample Split size to 710 and the Optimal Leaf Samples to 110, we get the best accuracy of 60.31% This accuracy represents the percentage of times our model predicts the correct output. Values predicted by the model are compared with actual value in the test data set to determine this metric. 5. If you were working for a marketing department, how would you use your churn production model in a real business environment? Explain why churn prediction might be good for the business and how one might improve churn by using this model. (2 Points) <span style="color:Blue">Answer </span> Churn prediction is an extremely important activity for any company. In the marketing department, churn can be of both high performing salesmen as well as customers. * By analysing customer churn, a company can take reasonable steps to retain the existing customer. Losing a customer will not only cost the company in lost revenue, but the company will also incur additional cost to bring in a new customer which often results in higher costs due to extra advertising or onboarding benefits. Customer acquisition is 5-40 times more expensive than customer retention and it makes economic sense for the company to keep existing customers happy. Doing so may also result in an increase in profits due to increased engagement with customers. * By analysing employee churn, a company can retain its high performing employees by rewarding them with perks and benefits as the salesperson is often the face of the company during negotiations. Loss of a high performing employee will cause a drop in sales and might also cause existing customers associated with that salesperson to shift loyalties. By referring to the churn prediction model, the company can take decisive steps to pursue its employees and customers. * E.g.: A customer identified by the model may be rewarded with a free discount voucher or a free exclusive service for a trial period or more industry relevant and personally rewarding offers. By doing so, the customer may feel more attached to the company resulting in an enhanced utilisation of related products and services. * E.g.: A high performing salesperson identified by the model can be rewarded with a free holiday package or a free mobile phone or more personally rewarding products and services in order to increase the employee's association with the company. Part 3 - Working with Time Series (Total 10 Points) Here we will analyze a timeseries of number of Yellow Fever cases from a specific Asian country by year. The data is from 1700 – 2008. The file cases.csv is available on NYU Classes. 1. Load the timeseries data set, and prepare the dataset by converting the variables to date-time format (hint: use date tools). (1 point) End of explanation """ from pandas.plotting import autocorrelation_plot from statsmodels.tsa.stattools import pacf,acf plt.figure(1) plt.figure(figsize=(10,5)) plt.title("Autocorrelation Plot") autocorrelation_plot(fever["YFCASES"]) plt.figure(2) plt.figure(figsize=(10,5)) plt.title("Partial Autocorrelation Plot") plt.plot(pacf(fever["YFCASES"])) plt.show() """ Explanation: 2. Plot the autocorrelation function (ACF) and partial autocorrelation function (PCF) of the cases timeseries. (1 point) End of explanation """ # We run Durbin Watson test on the residuls that we obtain from the OLS. from statsmodels.regression.linear_model import OLS from statsmodels.stats.stattools import durbin_watson ols_residuals=OLS(fever["YFCASES"],np.ones(len(fever["YFCASES"]))).fit() durbin_watson(ols_residuals.resid) """ Explanation: 3. Describe what the plots indicate (in terms of autocorrelation and autoregressive parameter (p) and moving average (q)). 2 points. Some rules of thumb to recall:<br> Rule 1: If the ACF shows exponential decay, the PACF has a spike at lag 1, and no correlation for other lags, then use one autoregressive (p)parameter <br> Rule 2: If the ACF shows a sine-wave shape pattern or a set of exponential decays, the PACF has spikes at lags 1 and 2, and no correlation for other lags, the use two autoregressive (p) parameters. <br> Rule 3: If the ACF has a spike at lag 1, no correlation for other lags, and the PACF damps out exponentially, then use one moving average (q) parameter. <br> Rule 4: If the ACF has spikes at lags 1 and 2, no correlation for other lags, and the PACF has a sine-wave shape pattern or a set of exponential decays, then use two moving average (q) parameter.<br> Rule 5: If the ACF shows exponential decay starting at lag 1, and the PACF shows exponential decay starting at lag 1, then use one autoregressive (p) and one moving average (q) parameter.<br> <span style="color:Blue">Answer </span> 1. The autocorrelation plot shows a sine-wave pattern meaning that the data exhibits strong seasonality. 2. The partial autocorrelation plot shows sharp spikes at lag of 1 and 2. We use "Rule 2" and select the autocorrelation parameter as 2 i.e. p=2 and q=0 4. Another approach to assessing the presence of autocorrelation is by using the Durbin-Waton (DW) statistic. The value of the DW statistic is close to 2 if the errors are uncorrelated. What is DW for our data, and does this match what you observed from the ACF and PCF plots? (1 point) End of explanation """ from statsmodels import tsa import statsmodels.api as sm indexedFever=fever.set_index("YEAR") cases=indexedFever.astype(float) arma_result = sm.tsa.ARMA(cases,(2,0)).fit() cases['forecast'] = arma_result.predict(start = 260 , end= 309, dynamic= True) cases[['YFCASES', 'forecast']].plot(figsize=(10, 5)) plt.show() """ Explanation: <span style="color:Blue">Answer </span> * We observe that the Durbin-Watson statistic is approximately 0.35 which means that there is strong positive relation between the previous and the current value of reported cases of yellow fever. The DW statistic matches the output observed from the autocorrelation plot which indicated strong correlation with lagged values. * E.g.: If the number of yellow fever cases increase in the current year, they will increase further in the next year and vice versa. * We can use the ARMA model to remove this autocorrelation between lagged values. 5. Removing serial dependency by modeling a simple ARMA process with p and q as derived above. Take a look at what the resulting process looks like (plot) (1 point) End of explanation """ print(stats.normaltest(arma_result.resid)) figureP3 = plt.figure(figsize=(10,5)) ax = figureP3.add_subplot(1,1,1) figP3 = qqplot(arma_result.resid, line='q', ax=ax, fit=True) plt.show() """ Explanation: 6. Calculate the residuals, and test the null hypothesis that the residuals come from a normal distribution, and construct a qq-plot. Do the results of the hypothesis test and qq-plot align? (1 point) End of explanation """ plt.figure(figsize=(15,5)) plt.title("Autocorrelation plot") autocorrelation_plot(arma_result.resid) plt.show() acfValue=acf(arma_result.resid,qstat=True) autocorrelation_value=acfValue[0] qstat_value=acfValue[1] p_value=acfValue[2] acfValue """ Explanation: <span style="color:Blue">Answer </span> We performed the test of normality on our data and also plotted a qqplot. The results of our normality test and our qqplot do not match. Our null hypothesis is that the distribution is normal. However, the p-value is less than 0.05 giving us enough evidence to reject the null hypothesis. This means that the data is not from a normal distribution. Our qqplot indicates that our data follows the theoretical normal distribution line. 7. Now investigate the autocorrelation of your ARMA(p,q) model. Did it improve? These can be examined graphically, but a statistic will help. Next, we calculate the lag, autocorrelation (AC), Q statistic and Prob>Q. The Ljung–Box Q test is a type of statistical test of whether any of a group of autocorrelations of a time series are different from zero. The null hypothesis is, H0: The data are independently distributed (i.e. the correlations in the population from which the sample is taken are 0, so that any observed correlations in the data result from randomness of the sampling process). (Hint: use qstat in tsa.acf). End of explanation """ from pandas import datetime begin_year = datetime(2009,1,1) end_year = datetime(2012,1,1) forecasted = arma_result.predict(start=begin_year, end=end_year) forecasted """ Explanation: <span style="color:Blue">Answer </span> The autocorrelation plot has improved as we see an absence of sinusoidal pattern which was earlier evident. We have reduced the impact of seasonality using the ARMA model. The Ljung–Box Q test is used to indicate whether our data is independently distributed or not. Based on the analysis of p values, can reject the null hypothesis that the data is independently distributed. We conclude that our data exhibits serial correlation. 8. Compute prediction for years 2009-2012 and analyze their fit against actual values. (1 point) End of explanation """ from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error ferror_begin=datetime(1700,1,1) ferror_end=datetime(2008,1,1) predictionARMA=arma_result.predict(start=ferror_begin,end=ferror_end) MAE=mean_absolute_error(fever["YFCASES"],predictionARMA) MFE=mean_squared_error(fever["YFCASES"],predictionARMA) print("MAE is %f and MFE is %f" %(MAE,MFE)) """ Explanation: 9. Calculate the forecast error via MAE and MFE. (2 points) Reminders: Mean absolute error: The mean absolute error (MAE) value is computed as the average absolute error value. If MAE is zero the forecast is perfect. As compared to the mean squared error (MSE), this measure of fit “de-emphasizes” outliers (unique or rare large error values will affect the MAE less than the MSE. Mean Forecast Error (MFE, also known as Bias). The MFE is the average error in the observations. A large positive MFE means that the forecast is undershooting the actual observations. A large negative MFE means the forecast is overshooting the actual observations. A value near zero is ideal, and generally a small value means a pretty good fit. The MAE is a better indicator of fit than the MFE. End of explanation """
bharat-b7/NN_glimpse
2.2.1 CNN HandsOn - MNIST & FC Nets.ipynb
unlicense
import os os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152 os.environ["CUDA_VISIBLE_DEVICES"] = "" #os.environ['THEANO_FLAGS'] = "device=gpu2" from keras.models import load_model from keras.models import Sequential from keras.layers.core import Dense, Dropout from keras.optimizers import SGD nb_classes = 10 # FC@512+relu -> DropOut(0.2) -> FC@512+relu -> DropOut(0.2) -> FC@nb_classes+softmax # ... your Code Here # %load solutions/sol_221_1.py from keras.models import Sequential from keras.layers.core import Dense, Dropout from keras.optimizers import SGD model = Sequential() model.add(Dense(512, activation='relu', input_shape=(784,))) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy']) """ Explanation: Fully Connected Feed-Forward Network In this notebook we will play with Feed-Forward FC-NN (Fully Connected Neural Network) for a classification task: Image Classification on MNIST Dataset RECALL In the FC-NN, the output of each layer is computed using the activations from the previous one, as follows: $$h_{i} = \sigma(W_i h_{i-1} + b_i)$$ where ${h}_i$ is the activation vector from the $i$-th layer (or the input data for $i=0$), ${W}_i$ and ${b}_i$ are the weight matrix and the bias vector for the $i$-th layer, respectively. <br><rb> $\sigma(\cdot)$ is the activation function. In our example, we will use the ReLU activation function for the hidden layers and softmax for the last layer. To regularize the model, we will also insert a Dropout layer between consecutive hidden layers. Dropout works by “dropping out” some unit activations in a given layer, that is setting them to zero with a given probability. Our loss function will be the categorical crossentropy. Model definition Keras supports two different kind of models: the Sequential model and the Graph model. The former is used to build linear stacks of layer (so each layer has one input and one output), and the latter supports any kind of connection graph. In our case we build a Sequential model with three Dense (aka fully connected) layers, with some Dropout. Notice that the output layer has the softmax activation function. The resulting model is actually a function of its own inputs implemented using the Keras backend. We apply the binary crossentropy loss and choose SGD as the optimizer. Please remind that Keras supports a variety of different optimizers and loss functions, which you may want to check out. End of explanation """ from keras.datasets import mnist from keras.utils import np_utils (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train = X_train.reshape(60000, 784) X_test = X_test.reshape(10000, 784) X_train = X_train.astype("float32") X_test = X_test.astype("float32") X_train /= 255 X_test /= 255 # convert class vectors to binary class matrices Y_train = np_utils.to_categorical(y_train, 10) Y_test = np_utils.to_categorical(y_test, 10) """ Explanation: Data preparation (keras.dataset) We will train our model on the MNIST dataset, which consists of 60,000 28x28 grayscale images of the 10 digits, along with a test set of 10,000 images. Since this dataset is provided with Keras, we just ask the keras.dataset model for training and test data. We will: download the data reshape data to be in vectorial form (original data are images) normalize between 0 and 1. The binary_crossentropy loss expects a one-hot-vector as input, therefore we apply the to_categorical function from keras.utilis to convert integer labels to one-hot-vectors. End of explanation """ # You can train the network yourself or simply load a saved model :P, for now!! #network_history = model.fit(X_train, Y_train, batch_size=1000, # nb_epoch=100, verbose=1, validation_data=(X_test, Y_test)) #model.save('example_MNIST_FC.h5') model=load_model('example_MNIST_FC.h5') model.summary() """ Explanation: Training Having defined and compiled the model, it can be trained using the fit function. We also specify a validation dataset to monitor validation loss and accuracy. End of explanation """ import matplotlib.pyplot as plt %matplotlib inline plt.figure() plt.xlabel('Epochs') plt.ylabel('Loss') plt.plot(network_history.history['loss']) plt.plot(network_history.history['val_loss']) plt.legend(['Training', 'Validation']) plt.figure() plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.plot(network_history.history['acc']) plt.plot(network_history.history['val_acc']) plt.legend(['Training', 'Validation'], loc='lower right') import numpy as np print(np.argmax(model.predict(X_test[5:10]),1)) print(y_test[5:10]) # Can you write a snippet that finds a misclassified sample in X_train and # displays the image, correct classification and your prediction """ Explanation: Plotting Network Performance Trend The return value of the fit function is a keras.callbacks.History object which contains the entire history of training/validation loss and accuracy, for each epoch. We can therefore plot the behaviour of loss and accuracy during the training phase. End of explanation """
lewisamarshall/ionize
interaction_constants.ipynb
gpl-2.0
# imports from ionize import Aqueous from math import sqrt, pi import pint ur = pint.UnitRegistry() Q = ur.Quantity # define values temperature = Q(25, 'degC') e = ur.elementary_charge kb = ur.boltzmann_constant dielectric = Aqueous.dielectric(temperature.magnitude) viscosity = Aqueous.viscosity(temperature.magnitude) * ur.pascal * ur.second Na = ur.avogadro_number eps = ur.vacuum_permittivity """ Explanation: Ion Interaction Ion interaction effects are important for correcting the mobility of ions based on the ionic strength of solution. These effects are laid out in the Irreversible Processes in Electrohoresis, by Onsager and Fuoss. These effects have prefactors that need to be converted to appropriate units for use. Here, I'm going to resolve the units on these prefactors, and resolve the differences between the paper and the STEEP implimentation. End of explanation """ # STEEP: F*0.2297*z.*omega+31.410e-9 coefficient_1 = 0.2297 * ((temperature.to('degK'))**.5*dielectric)**3. coefficient_2 = 31.410e-9 * viscosity * (temperature.to('degK')**.5*dielectric) print(coefficient_1, coefficient_2) """ Explanation: Robinson-Stokes End of explanation """ # Bhaga Paper A = e**3 / 12 / pi A *= (Na / (dielectric * eps * kb * temperature.to('degK'))**3)**.5 print('[A] =', A.dimensionality) print('A =', A.to('liter^.5/mol^.5')) B = e**2 / 6 / pi / viscosity B *= (Na / (dielectric * eps * kb * temperature.to('degK')))**.5 print('[B] =', B.dimensionality) print('B =', B.to('m^2/V/s* liter^.5/mol^.5')) """ Explanation: STEEP O-F mob_new= F * omega - (F*0.78420*z.*factor.*omega* / dielectric^1.5 / temperature^1.5 + 31.410e-9 * viscosity / dielectric^.5 / temperature^.5 .* sqrt(IonicStr/2000)./(1+1.5*sqrt((yy(5)*yy(1))/(yy(6)*yy(2))) *sqrt(IonicStr/2000)); STEEP RS %Robinson-Stokes, just checking dont use this. %mob_new=F*omega-(F*0.2297*z.*omega+31.410e-9).*sqrt(IonicStr/2000)./(1+1.5*sqrt(IonicStr/2000)); %Assemble matrix back Onsager-Fuoss The Onsager-Fuoss constants A and B. End of explanation """ Ap = e**3 / 12 / pi Ap *= (Na / (eps * kb)**3)**.5 print(Ap.dimensionality) print(Ap.to('liter^.5*degK^1.5/mol^.5')) Bp = e**2 / 6 / pi Bp *= (Na / (eps * kb))**.5 print(Bp.dimensionality) print(Bp.to('m^2/V/s* liter^.5/mol^.5 * Pa * s * degK**.5')) # TODO: check steep and spresso implementation # also check onsager-fuoss paper """ Explanation: Temperature-dependant values Below are values that only include temperature independant versions. A = Ap / dielectric^1.5 / temperature^1.5 B = Bp x viscosity / dielectric^.5 / temperature^.5 End of explanation """ D = (2 * e ** 2 * Na/ dielectric / eps / kb / temperature.to('degK'))**.5 print(D.dimensionality) (D).to('m^.5/mol^.5') (1/D / Q('(1e-7 mol/L)^.5')).to('um') Dp = (2 * e ** 2 * Na / eps / kb)**.5 print(Dp.dimensionality) Dp.to(()) """ Explanation: Pitts End of explanation """
yttty/python3-scraper-tutorial
Python_Spider_Tutorial_01.ipynb
gpl-3.0
#encoding:UTF-8 import urllib.request url = "http://www.pku.edu.cn" data = urllib.request.urlopen(url).read() data = data.decode('UTF-8') print(data) """ Explanation: 甹Python 3匀发眑络爬虫 By Terrill Yang (Github: https://github.com/yttty) 由䜠需芁这些Python3.x爬虫孊习资料敎理 - 知乎䞓栏敎理而来。 甹Python 3匀发眑络爬虫 - Chapter 01 1. 䞀䞪简单的䌪代码 以䞋这䞪简单的䌪代码甚到了set和queue这䞀种经兞的数据结构, 集䞎队列. 集的䜜甚是记圕那些已经访问过的页面, 队列的䜜甚是进行广床䌘先搜玢. queue Q set S StartPoint = "http://jecvay.com" Q.push(StartPoint) # 经兞的BFS匀倎 S.insert(StartPoint) # 访问䞀䞪页面之前先标记他䞺已访问 while (Q.empty() == false) # BFS埪环䜓 T = Q.top() # 并䞔pop for point in PageUrl(T) # PageUrl(T)是指页面T䞭所有url的集合, point是这䞪集合䞭的䞀䞪元玠. if (point not in S) Q.push(point) S.insert(point) 这䞪䌪代码䞍胜执行, 䜆是看懂是没问题的, 这就是䞪最简单的BFS结构. 我是看了知乎里面的那䞪䌪代码之后, 自己甚我的风栌写了䞀遍. 䜠也需芁甚䜠的风栌写䞀遍. 这里甚到的Set其内郚原理是采甚了Hashè¡š, 䌠统的Hash对爬虫来诎占甚空闎倪倧, 因歀有䞀种叫做Bloom Filter的数据结构曎适合甚圚这里替代Hash版本的set. 我打算以后再看这䞪数据结构怎么䜿甚, 现圚先跳过, 因䞺对于零基础的我来诎, 这䞍是重点. 2. 甹Python抓取指定页面 End of explanation """ a = urllib.request.urlopen(url) type(a) a.geturl() a.info() a.getcode() """ Explanation: urllib.request是䞀䞪库, 隶属urllib. 点歀打匀官方盞关文档. 官方文档应该怎么䜿甚呢? 銖先点刚刚提到的这䞪铟接进去的页面有urllib的几䞪子库, 我们暂时甚到了request, 所以我们先看urllib.request郚分. 銖先看到的是䞀句话介绍这䞪库是干什么甚的: The urllib.request module defines functions and classes which help in opening URLs (mostly HTTP) in a complex world — basic and digest authentication, redirections, cookies and more. 然后把我们代码䞭甚到的urlopen()凜数郚分阅读完. urllib.request.urlopen(url, data=None, [timeout, ]*, cafile=None, capath=None, cadefault=False) 重点郚分是返回倌, 这䞪凜数返回䞀䞪 http.client.HTTPResponse 对象, 这䞪对象又有各种方法, 比劂我们甚到的read()方法, 这些方法郜可以根据官方文档的铟接铟过去. 根据官方文档所写, 我甚控制台运行完毕䞊面这䞪皋序后, 又继续运行劂䞋代码, 以曎熟悉这些乱䞃八糟的方法是干什么的. End of explanation """ import urllib import urllib.request data={} data['word']='Jecvay Notes' url_values=urllib.parse.urlencode(data) url="http://www.baidu.com/s?" full_url=url+url_values data=urllib.request.urlopen(full_url).read() data=data.decode('UTF-8') print(data) """ Explanation: 3. 甹Python简单倄理URL 劂果芁抓取癟床䞊面搜玢关键词䞺Jecvay Notes的眑页, 则代码劂䞋 End of explanation """
GoogleCloudPlatform/vertex-ai-samples
notebooks/community/gapic/automl/showcase_automl_image_classification_export_edge.ipynb
apache-2.0
import os import sys # Google Cloud Notebook if os.path.exists("/opt/deeplearning/metadata/env_version"): USER_FLAG = "--user" else: USER_FLAG = "" ! pip3 install -U google-cloud-aiplatform $USER_FLAG """ Explanation: Vertex client library: AutoML image classification model for export to edge <table align="left"> <td> <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/automl/showcase_automl_image_classification_export_edge.ipynb"> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab </a> </td> <td> <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/automl/showcase_automl_image_classification_export_edge.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> </table> <br/><br/><br/> Overview This tutorial demonstrates how to use the Vertex client library for Python to create image classification models to export as an Edge model using Google Cloud's AutoML. Dataset The dataset used for this tutorial is the Flowers dataset from TensorFlow Datasets. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of flower an image is from a class of five flowers: daisy, dandelion, rose, sunflower, or tulip. Objective In this tutorial, you create a AutoML image classification model from a Python script using the Vertex client library, and then export the model as an Edge model in TFLite format. You can alternatively create models with AutoML using the gcloud command-line tool or online using the Google Cloud Console. The steps performed include: Create a Vertex Dataset resource. Train the model. Export the Edge model from the Model resource to Cloud Storage. Download the model locally. Make a local prediction. Costs This tutorial uses billable components of Google Cloud (GCP): Vertex AI Cloud Storage Learn about Vertex AI pricing and Cloud Storage pricing, and use the Pricing Calculator to generate a cost estimate based on your projected usage. Installation Install the latest version of Vertex client library. End of explanation """ ! pip3 install -U google-cloud-storage $USER_FLAG """ Explanation: Install the latest GA version of google-cloud-storage library as well. End of explanation """ if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) """ Explanation: Restart the kernel Once you've installed the Vertex client library and Google cloud-storage, you need to restart the notebook kernel so it can find the packages. End of explanation """ PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID """ Explanation: Before you begin GPU runtime Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select Runtime > Change Runtime Type > GPU Set up your Google Cloud project The following steps are required, regardless of your notebook environment. Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs. Make sure that billing is enabled for your project. Enable the Vertex APIs and Compute Engine APIs. The Google Cloud SDK is already installed in Google Cloud Notebook. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $ into these commands. End of explanation """ REGION = "us-central1" # @param {type: "string"} """ Explanation: Region You can also change the REGION variable, which is used for operations throughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you. Americas: us-central1 Europe: europe-west4 Asia Pacific: asia-east1 You may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the Vertex locations documentation End of explanation """ from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") """ Explanation: Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial. End of explanation """ # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on Google Cloud Notebook, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' """ Explanation: Authenticate your Google Cloud account If you are using Google Cloud Notebook, your environment is already authenticated. Skip this step. If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. Otherwise, follow these steps: In the Cloud Console, go to the Create service account key page. Click Create service account. In the Service account name field, enter a name, and click Create. In the Grant this service account access to project section, click the Role drop-down list. Type "Vertex" into the filter box, and select Vertex Administrator. Type "Storage Object Admin" into the filter box, and select Storage Object Admin. Click Create. A JSON file that contains your key downloads to your local environment. Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell. End of explanation """ BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP """ Explanation: Create a Cloud Storage bucket The following steps are required, regardless of your notebook environment. This tutorial is designed to use training data that is in a public Cloud Storage bucket and a local Cloud Storage bucket for exporting the trained model. You may alternatively use your own training data that you have stored in a local Cloud Storage bucket. Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization. End of explanation """ ! gsutil mb -l $REGION $BUCKET_NAME """ Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket. End of explanation """ ! gsutil ls -al $BUCKET_NAME """ Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents: End of explanation """ import time from google.cloud.aiplatform import gapic as aip from google.protobuf import json_format from google.protobuf.json_format import MessageToJson, ParseDict from google.protobuf.struct_pb2 import Struct, Value """ Explanation: Set up variables Next, set up some variables used throughout the tutorial. Import libraries and define constants Import Vertex client library Import the Vertex client library into our Python environment. End of explanation """ # API service endpoint API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION) # Vertex location root path for your dataset, model and endpoint resources PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION """ Explanation: Vertex constants Setup up the following constants for Vertex: API_ENDPOINT: The Vertex API service endpoint for dataset, model, job, pipeline and endpoint services. PARENT: The Vertex location root path for dataset, model, job, pipeline and endpoint resources. End of explanation """ # Image Dataset type DATA_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/metadata/image_1.0.0.yaml" # Image Labeling type LABEL_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/ioformat/image_classification_single_label_io_format_1.0.0.yaml" # Image Training task TRAINING_SCHEMA = "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_image_classification_1.0.0.yaml" """ Explanation: AutoML constants Set constants unique to AutoML datasets and training: Dataset Schemas: Tells the Dataset resource service which type of dataset it is. Data Labeling (Annotations) Schemas: Tells the Dataset resource service how the data is labeled (annotated). Dataset Training Schemas: Tells the Pipeline resource service the task (e.g., classification) to train the model for. End of explanation """ # client options same for all services client_options = {"api_endpoint": API_ENDPOINT} def create_dataset_client(): client = aip.DatasetServiceClient(client_options=client_options) return client def create_model_client(): client = aip.ModelServiceClient(client_options=client_options) return client def create_pipeline_client(): client = aip.PipelineServiceClient(client_options=client_options) return client clients = {} clients["dataset"] = create_dataset_client() clients["model"] = create_model_client() clients["pipeline"] = create_pipeline_client() for client in clients.items(): print(client) """ Explanation: Tutorial Now you are ready to start creating your own AutoML image classification model. Set up clients The Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server. You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront. Dataset Service for Dataset resources. Model Service for Model resources. Pipeline Service for training. End of explanation """ TIMEOUT = 90 def create_dataset(name, schema, labels=None, timeout=TIMEOUT): start_time = time.time() try: dataset = aip.Dataset( display_name=name, metadata_schema_uri=schema, labels=labels ) operation = clients["dataset"].create_dataset(parent=PARENT, dataset=dataset) print("Long running operation:", operation.operation.name) result = operation.result(timeout=TIMEOUT) print("time:", time.time() - start_time) print("response") print(" name:", result.name) print(" display_name:", result.display_name) print(" metadata_schema_uri:", result.metadata_schema_uri) print(" metadata:", dict(result.metadata)) print(" create_time:", result.create_time) print(" update_time:", result.update_time) print(" etag:", result.etag) print(" labels:", dict(result.labels)) return result except Exception as e: print("exception:", e) return None result = create_dataset("flowers-" + TIMESTAMP, DATA_SCHEMA) """ Explanation: Dataset Now that your clients are ready, your first step in training a model is to create a managed dataset instance, and then upload your labeled data to it. Create Dataset resource instance Use the helper function create_dataset to create the instance of a Dataset resource. This function does the following: Uses the dataset client service. Creates an Vertex Dataset resource (aip.Dataset), with the following parameters: display_name: The human-readable name you choose to give it. metadata_schema_uri: The schema for the dataset type. Calls the client dataset service method create_dataset, with the following parameters: parent: The Vertex location root path for your Database, Model and Endpoint resources. dataset: The Vertex dataset object instance you created. The method returns an operation object. An operation object is how Vertex handles asynchronous calls for long running operations. While this step usually goes fast, when you first use it in your project, there is a longer delay due to provisioning. You can use the operation object to get status on the operation (e.g., create Dataset resource) or to cancel the operation, by invoking an operation method: | Method | Description | | ----------- | ----------- | | result() | Waits for the operation to complete and returns a result object in JSON format. | | running() | Returns True/False on whether the operation is still running. | | done() | Returns True/False on whether the operation is completed. | | canceled() | Returns True/False on whether the operation was canceled. | | cancel() | Cancels the operation (this may take up to 30 seconds). | End of explanation """ # The full unique ID for the dataset dataset_id = result.name # The short numeric ID for the dataset dataset_short_id = dataset_id.split("/")[-1] print(dataset_id) """ Explanation: Now save the unique dataset identifier for the Dataset resource instance you created. End of explanation """ IMPORT_FILE = ( "gs://cloud-samples-data/vision/automl_classification/flowers/all_data_v2.csv" ) """ Explanation: Data preparation The Vertex Dataset resource for images has some requirements for your data: Images must be stored in a Cloud Storage bucket. Each image file must be in an image format (PNG, JPEG, BMP, ...). There must be an index file stored in your Cloud Storage bucket that contains the path and label for each image. The index file must be either CSV or JSONL. CSV For image classification, the CSV index file has the requirements: No heading. First column is the Cloud Storage path to the image. Second column is the label. Location of Cloud Storage training data. Now set the variable IMPORT_FILE to the location of the CSV index file in Cloud Storage. End of explanation """ if "IMPORT_FILES" in globals(): FILE = IMPORT_FILES[0] else: FILE = IMPORT_FILE count = ! gsutil cat $FILE | wc -l print("Number of Examples", int(count[0])) print("First 10 rows") ! gsutil cat $FILE | head """ Explanation: Quick peek at your data You will use a version of the Flowers dataset that is stored in a public Cloud Storage bucket, using a CSV index file. Start by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (wc -l) and then peek at the first few rows. End of explanation """ def import_data(dataset, gcs_sources, schema): config = [{"gcs_source": {"uris": gcs_sources}, "import_schema_uri": schema}] print("dataset:", dataset_id) start_time = time.time() try: operation = clients["dataset"].import_data( name=dataset_id, import_configs=config ) print("Long running operation:", operation.operation.name) result = operation.result() print("result:", result) print("time:", int(time.time() - start_time), "secs") print("error:", operation.exception()) print("meta :", operation.metadata) print( "after: running:", operation.running(), "done:", operation.done(), "cancelled:", operation.cancelled(), ) return operation except Exception as e: print("exception:", e) return None import_data(dataset_id, [IMPORT_FILE], LABEL_SCHEMA) """ Explanation: Import data Now, import the data into your Vertex Dataset resource. Use this helper function import_data to import the data. The function does the following: Uses the Dataset client. Calls the client method import_data, with the following parameters: name: The human readable name you give to the Dataset resource (e.g., flowers). import_configs: The import configuration. import_configs: A Python list containing a dictionary, with the key/value entries: gcs_sources: A list of URIs to the paths of the one or more index files. import_schema_uri: The schema identifying the labeling type. The import_data() method returns a long running operation object. This will take a few minutes to complete. If you are in a live tutorial, this would be a good time to ask questions, or take a personal break. End of explanation """ def create_pipeline(pipeline_name, model_name, dataset, schema, task): dataset_id = dataset.split("/")[-1] input_config = { "dataset_id": dataset_id, "fraction_split": { "training_fraction": 0.8, "validation_fraction": 0.1, "test_fraction": 0.1, }, } training_pipeline = { "display_name": pipeline_name, "training_task_definition": schema, "training_task_inputs": task, "input_data_config": input_config, "model_to_upload": {"display_name": model_name}, } try: pipeline = clients["pipeline"].create_training_pipeline( parent=PARENT, training_pipeline=training_pipeline ) print(pipeline) except Exception as e: print("exception:", e) return None return pipeline """ Explanation: Train the model Now train an AutoML image classification model using your Vertex Dataset resource. To train the model, do the following steps: Create an Vertex training pipeline for the Dataset resource. Execute the pipeline to start the training. Create a training pipeline You may ask, what do we use a pipeline for? You typically use pipelines when the job (such as training) has multiple steps, generally in sequential order: do step A, do step B, etc. By putting the steps into a pipeline, we gain the benefits of: Being reusable for subsequent training jobs. Can be containerized and ran as a batch job. Can be distributed. All the steps are associated with the same pipeline job for tracking progress. Use this helper function create_pipeline, which takes the following parameters: pipeline_name: A human readable name for the pipeline job. model_name: A human readable name for the model. dataset: The Vertex fully qualified dataset identifier. schema: The dataset labeling (annotation) training schema. task: A dictionary describing the requirements for the training job. The helper function calls the Pipeline client service'smethod create_pipeline, which takes the following parameters: parent: The Vertex location root path for your Dataset, Model and Endpoint resources. training_pipeline: the full specification for the pipeline training job. Let's look now deeper into the minimal requirements for constructing a training_pipeline specification: display_name: A human readable name for the pipeline job. training_task_definition: The dataset labeling (annotation) training schema. training_task_inputs: A dictionary describing the requirements for the training job. model_to_upload: A human readable name for the model. input_data_config: The dataset specification. dataset_id: The Vertex dataset identifier only (non-fully qualified) -- this is the last part of the fully-qualified identifier. fraction_split: If specified, the percentages of the dataset to use for training, test and validation. Otherwise, the percentages are automatically selected by AutoML. End of explanation """ PIPE_NAME = "flowers_pipe-" + TIMESTAMP MODEL_NAME = "flowers_model-" + TIMESTAMP task = json_format.ParseDict( { "multi_label": False, "budget_milli_node_hours": 8000, "model_type": "MOBILE_TF_LOW_LATENCY_1", "disable_early_stopping": False, }, Value(), ) response = create_pipeline(PIPE_NAME, MODEL_NAME, dataset_id, TRAINING_SCHEMA, task) """ Explanation: Construct the task requirements Next, construct the task requirements. Unlike other parameters which take a Python (JSON-like) dictionary, the task field takes a Google protobuf Struct, which is very similar to a Python dictionary. Use the json_format.ParseDict method for the conversion. The minimal fields we need to specify are: multi_label: Whether True/False this is a multi-label (vs single) classification. budget_milli_node_hours: The maximum time to budget (billed) for training the model, where 1000 = 1 hour. For image classification, the budget must be a minimum of 8 hours. model_type: The type of deployed model: CLOUD: For deploying to Google Cloud. MOBILE_TF_LOW_LATENCY_1: For deploying to the edge and optimizing for latency (response time). MOBILE_TF_HIGH_ACCURACY_1: For deploying to the edge and optimizing for accuracy. MOBILE_TF_VERSATILE_1: For deploying to the edge and optimizing for a trade off between latency and accuracy. disable_early_stopping: Whether True/False to let AutoML use its judgement to stop training early or train for the entire budget. Finally, create the pipeline by calling the helper function create_pipeline, which returns an instance of a training pipeline object. End of explanation """ # The full unique ID for the pipeline pipeline_id = response.name # The short numeric ID for the pipeline pipeline_short_id = pipeline_id.split("/")[-1] print(pipeline_id) """ Explanation: Now save the unique identifier of the training pipeline you created. End of explanation """ def get_training_pipeline(name, silent=False): response = clients["pipeline"].get_training_pipeline(name=name) if silent: return response print("pipeline") print(" name:", response.name) print(" display_name:", response.display_name) print(" state:", response.state) print(" training_task_definition:", response.training_task_definition) print(" training_task_inputs:", dict(response.training_task_inputs)) print(" create_time:", response.create_time) print(" start_time:", response.start_time) print(" end_time:", response.end_time) print(" update_time:", response.update_time) print(" labels:", dict(response.labels)) return response response = get_training_pipeline(pipeline_id) """ Explanation: Get information on a training pipeline Now get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's get_training_pipeline method, with the following parameter: name: The Vertex fully qualified pipeline identifier. When the model is done training, the pipeline state will be PIPELINE_STATE_SUCCEEDED. End of explanation """ while True: response = get_training_pipeline(pipeline_id, True) if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED: print("Training job has not completed:", response.state) model_to_deploy_id = None if response.state == aip.PipelineState.PIPELINE_STATE_FAILED: raise Exception("Training Job Failed") else: model_to_deploy = response.model_to_upload model_to_deploy_id = model_to_deploy.name print("Training Time:", response.end_time - response.start_time) break time.sleep(60) print("model to deploy:", model_to_deploy_id) """ Explanation: Deployment Training the above model may take upwards of 30 minutes time. Once your model is done training, you can calculate the actual time it took to train the model by subtracting end_time from start_time. For your model, you will need to know the fully qualified Vertex Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field model_to_deploy.name. End of explanation """ def list_model_evaluations(name): response = clients["model"].list_model_evaluations(parent=name) for evaluation in response: print("model_evaluation") print(" name:", evaluation.name) print(" metrics_schema_uri:", evaluation.metrics_schema_uri) metrics = json_format.MessageToDict(evaluation._pb.metrics) for metric in metrics.keys(): print(metric) print("logloss", metrics["logLoss"]) print("auPrc", metrics["auPrc"]) return evaluation.name last_evaluation = list_model_evaluations(model_to_deploy_id) """ Explanation: Model information Now that your model is trained, you can get some information on your model. Evaluate the Model resource Now find out how good the model service believes your model is. As part of training, some portion of the dataset was set aside as the test (holdout) data, which is used by the pipeline service to evaluate the model. List evaluations for all slices Use this helper function list_model_evaluations, which takes the following parameter: name: The Vertex fully qualified model identifier for the Model resource. This helper function uses the model client service's list_model_evaluations method, which takes the same parameter. The response object from the call is a list, where each element is an evaluation metric. For each evaluation (you probably only have one) we then print all the key names for each metric in the evaluation, and for a small set (logLoss and auPrc) you will print the result. End of explanation """ MODEL_DIR = BUCKET_NAME + "/" + "flowers" def export_model(name, format, gcs_dest): output_config = { "artifact_destination": {"output_uri_prefix": gcs_dest}, "export_format_id": format, } response = clients["model"].export_model(name=name, output_config=output_config) print("Long running operation:", response.operation.name) result = response.result(timeout=1800) metadata = response.operation.metadata artifact_uri = str(metadata.value).split("\\")[-1][4:-1] print("Artifact Uri", artifact_uri) return artifact_uri model_package = export_model(model_to_deploy_id, "tflite", MODEL_DIR) """ Explanation: Export as Edge model You can export an AutoML image classification model as an Edge model which you can then custom deploy to an edge device, such as a mobile phone or IoT device, or download locally. Use this helper function export_model to export the model to Google Cloud, which takes the following parameters: name: The Vertex fully qualified identifier for the Model resource. format: The format to save the model format as. gcs_dest: The Cloud Storage location to store the SavedFormat model artifacts to. This function calls the Model client service's method export_model, with the following parameters: name: The Vertex fully qualified identifier for the Model resource. output_config: The destination information for the exported model. artifact_destination.output_uri_prefix: The Cloud Storage location to store the SavedFormat model artifacts to. export_format_id: The format to save the model format as. For AutoML image classification: tf-saved-model: TensorFlow SavedFormat for deployment to a container. tflite: TensorFlow Lite for deployment to an edge or mobile device. edgetpu-tflite: TensorFlow Lite for TPU tf-js: TensorFlow for web client coral-ml: for Coral devices The method returns a long running operation response. We will wait sychronously for the operation to complete by calling the response.result(), which will block until the model is exported. End of explanation """ ! gsutil ls $model_package # Download the model artifacts ! gsutil cp -r $model_package tflite tflite_path = "tflite/model.tflite" """ Explanation: Download the TFLite model artifacts Now that you have an exported TFLite version of your model, you can test the exported model locally, but first downloading it from Cloud Storage. End of explanation """ import tensorflow as tf interpreter = tf.lite.Interpreter(model_path=tflite_path) interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() input_shape = input_details[0]["shape"] print("input tensor shape", input_shape) """ Explanation: Instantiate a TFLite interpreter The TFLite version of the model is not a TensorFlow SavedModel format. You cannot directly use methods like predict(). Instead, one uses the TFLite interpreter. You must first setup the interpreter for the TFLite model as follows: Instantiate an TFLite interpreter for the TFLite model. Instruct the interpreter to allocate input and output tensors for the model. Get detail information about the models input and output tensors that will need to be known for prediction. End of explanation """ test_items = ! gsutil cat $IMPORT_FILE | head -n1 test_item = test_items[0].split(",")[0] with tf.io.gfile.GFile(test_item, "rb") as f: content = f.read() test_image = tf.io.decode_jpeg(content) print("test image shape", test_image.shape) test_image = tf.image.resize(test_image, (224, 224)) print("test image shape", test_image.shape, test_image.dtype) test_image = tf.cast(test_image, dtype=tf.uint8).numpy() """ Explanation: Get test item You will use an arbitrary example out of the dataset as a test item. Don't be concerned that the example was likely used in training the model -- we just want to demonstrate how to make a prediction. End of explanation """ import numpy as np data = np.expand_dims(test_image, axis=0) interpreter.set_tensor(input_details[0]["index"], data) interpreter.invoke() softmax = interpreter.get_tensor(output_details[0]["index"]) label = np.argmax(softmax) print(label) """ Explanation: Make a prediction with TFLite model Finally, you do a prediction using your TFLite model, as follows: Convert the test image into a batch of a single image (np.expand_dims) Set the input tensor for the interpreter to your batch of a single image (data). Invoke the interpreter. Retrieve the softmax probabilities for the prediction (get_tensor). Determine which label had the highest probability (np.argmax). End of explanation """ delete_dataset = True delete_pipeline = True delete_model = True delete_endpoint = True delete_batchjob = True delete_customjob = True delete_hptjob = True delete_bucket = True # Delete the dataset using the Vertex fully qualified identifier for the dataset try: if delete_dataset and "dataset_id" in globals(): clients["dataset"].delete_dataset(name=dataset_id) except Exception as e: print(e) # Delete the training pipeline using the Vertex fully qualified identifier for the pipeline try: if delete_pipeline and "pipeline_id" in globals(): clients["pipeline"].delete_training_pipeline(name=pipeline_id) except Exception as e: print(e) # Delete the model using the Vertex fully qualified identifier for the model try: if delete_model and "model_to_deploy_id" in globals(): clients["model"].delete_model(name=model_to_deploy_id) except Exception as e: print(e) # Delete the endpoint using the Vertex fully qualified identifier for the endpoint try: if delete_endpoint and "endpoint_id" in globals(): clients["endpoint"].delete_endpoint(name=endpoint_id) except Exception as e: print(e) # Delete the batch job using the Vertex fully qualified identifier for the batch job try: if delete_batchjob and "batch_job_id" in globals(): clients["job"].delete_batch_prediction_job(name=batch_job_id) except Exception as e: print(e) # Delete the custom job using the Vertex fully qualified identifier for the custom job try: if delete_customjob and "job_id" in globals(): clients["job"].delete_custom_job(name=job_id) except Exception as e: print(e) # Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job try: if delete_hptjob and "hpt_job_id" in globals(): clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id) except Exception as e: print(e) if delete_bucket and "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAME """ Explanation: Cleaning up To clean up all GCP resources used in this project, you can delete the GCP project you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial: Dataset Pipeline Model Endpoint Batch Job Custom Job Hyperparameter Tuning Job Cloud Storage Bucket End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/ncc/cmip6/models/sandbox-1/ocean.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'ncc', 'sandbox-1', 'ocean') """ Explanation: ES-DOC CMIP6 Model Properties - Ocean MIP Era: CMIP6 Institute: NCC Source ID: SANDBOX-1 Topic: Ocean Sub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing. Properties: 133 (101 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:25 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Seawater Properties 3. Key Properties --&gt; Bathymetry 4. Key Properties --&gt; Nonoceanic Waters 5. Key Properties --&gt; Software Properties 6. Key Properties --&gt; Resolution 7. Key Properties --&gt; Tuning Applied 8. Key Properties --&gt; Conservation 9. Grid 10. Grid --&gt; Discretisation --&gt; Vertical 11. Grid --&gt; Discretisation --&gt; Horizontal 12. Timestepping Framework 13. Timestepping Framework --&gt; Tracers 14. Timestepping Framework --&gt; Baroclinic Dynamics 15. Timestepping Framework --&gt; Barotropic 16. Timestepping Framework --&gt; Vertical Physics 17. Advection 18. Advection --&gt; Momentum 19. Advection --&gt; Lateral Tracers 20. Advection --&gt; Vertical Tracers 21. Lateral Physics 22. Lateral Physics --&gt; Momentum --&gt; Operator 23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff 24. Lateral Physics --&gt; Tracers 25. Lateral Physics --&gt; Tracers --&gt; Operator 26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff 27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity 28. Vertical Physics 29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details 30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers 31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum 32. Vertical Physics --&gt; Interior Mixing --&gt; Details 33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers 34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum 35. Uplow Boundaries --&gt; Free Surface 36. Uplow Boundaries --&gt; Bottom Boundary Layer 37. Boundary Forcing 38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction 39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction 40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration 41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing 1. Key Properties Ocean key properties 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of ocean model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of ocean model code (NEMO 3.6, MOM 5.0,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.model_family') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OGCM" # "slab ocean" # "mixed layer ocean" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Model Family Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of ocean model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.basic_approximations') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Primitive equations" # "Non-hydrostatic" # "Boussinesq" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Basic approximations made in the ocean. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Potential temperature" # "Conservative temperature" # "Salinity" # "U-velocity" # "V-velocity" # "W-velocity" # "SSH" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.5. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of prognostic variables in the ocean component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Linear" # "Wright, 1997" # "Mc Dougall et al." # "Jackett et al. 2006" # "TEOS 2010" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Seawater Properties Physical properties of seawater in ocean 2.1. Eos Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EOS for sea water End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Potential temperature" # "Conservative temperature" # TODO - please enter value(s) """ Explanation: 2.2. Eos Functional Temp Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Temperature used in EOS for sea water End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Practical salinity Sp" # "Absolute salinity Sa" # TODO - please enter value(s) """ Explanation: 2.3. Eos Functional Salt Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Salinity used in EOS for sea water End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pressure (dbars)" # "Depth (meters)" # TODO - please enter value(s) """ Explanation: 2.4. Eos Functional Depth Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Depth or pressure used in EOS for sea water ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "TEOS 2010" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 2.5. Ocean Freezing Point Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 2.6. Ocean Specific Heat Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specific heat in ocean (cpocean) in J/(kg K) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 2.7. Ocean Reference Density Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Boussinesq reference density (rhozero) in kg / m3 End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Present day" # "21000 years BP" # "6000 years BP" # "LGM" # "Pliocene" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Bathymetry Properties of bathymetry in ocean 3.1. Reference Dates Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Reference date of bathymetry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.type') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 3.2. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the bathymetry fixed in time in the ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. Ocean Smoothing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe any smoothing or hand editing of bathymetry in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.source') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.4. Source Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe source of bathymetry in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Nonoceanic Waters Non oceanic waters treatement in ocean 4.1. Isolated Seas Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how isolated seas is performed End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. River Mouth Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how river mouth mixing or estuaries specific treatment is performed End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Software Properties Software properties of ocean code 5.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Resolution Resolution in the ocean grid 6.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Canonical Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Range Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Range of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 6.4. Number Of Horizontal Gridpoints Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 6.5. Number Of Vertical Levels Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of vertical levels resolved on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.6. Is Adaptive Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Default is False. Set true if grid resolution changes during execution. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 6.7. Thickness Level 1 Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Thickness of first surface ocean level (in meters) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Tuning Applied Tuning methodology for ocean component 7.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics of the global mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation Conservation in the ocean component 8.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Brief description of conservation methodology End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.scheme') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Energy" # "Enstrophy" # "Salt" # "Volume of ocean" # "Momentum" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Properties conserved in the ocean by the numerical schemes End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Consistency Properties Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Any additional consistency properties (energy conversion, pressure gradient discretisation, ...)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.4. Corrected Conserved Prognostic Variables Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Set of variables which are conserved by more than the numerical scheme alone. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.5. Was Flux Correction Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Does conservation involve flux correction ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Grid Ocean grid 9.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of grid in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Z-coordinate" # "Z*-coordinate" # "S-coordinate" # "Isopycnic - sigma 0" # "Isopycnic - sigma 2" # "Isopycnic - sigma 4" # "Isopycnic - other" # "Hybrid / Z+S" # "Hybrid / Z+isopycnic" # "Hybrid / other" # "Pressure referenced (P)" # "P*" # "Z**" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Grid --&gt; Discretisation --&gt; Vertical Properties of vertical discretisation in ocean 10.1. Coordinates Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of vertical coordinates in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 10.2. Partial Steps Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Using partial steps with Z or Z vertical coordinate in ocean ?* End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Lat-lon" # "Rotated north pole" # "Two north poles (ORCA-style)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11. Grid --&gt; Discretisation --&gt; Horizontal Type of horizontal discretisation scheme in ocean 11.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal grid type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Arakawa B-grid" # "Arakawa C-grid" # "Arakawa E-grid" # "N/a" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.2. Staggering Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Horizontal grid staggering type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Finite difference" # "Finite volumes" # "Finite elements" # "Unstructured grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.3. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal discretisation scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12. Timestepping Framework Ocean Timestepping Framework 12.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of time stepping in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Via coupling" # "Specific treatment" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.2. Diurnal Cycle Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Diurnal cycle type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Leap-frog + Asselin filter" # "Leap-frog + Periodic Euler" # "Predictor-corrector" # "Runge-Kutta 2" # "AM3-LF" # "Forward-backward" # "Forward operator" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Timestepping Framework --&gt; Tracers Properties of tracers time stepping in ocean 13.1. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Tracers time stepping scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.2. Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Tracers time step (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Preconditioned conjugate gradient" # "Sub cyling" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Timestepping Framework --&gt; Baroclinic Dynamics Baroclinic dynamics in ocean 14.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Baroclinic dynamics type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Leap-frog + Asselin filter" # "Leap-frog + Periodic Euler" # "Predictor-corrector" # "Runge-Kutta 2" # "AM3-LF" # "Forward-backward" # "Forward operator" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Baroclinic dynamics scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.3. Time Step Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Baroclinic time step (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "split explicit" # "implicit" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15. Timestepping Framework --&gt; Barotropic Barotropic time stepping in ocean 15.1. Splitting Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time splitting method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.2. Time Step Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Barotropic time step (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16. Timestepping Framework --&gt; Vertical Physics Vertical physics time stepping in ocean 16.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Details of vertical time stepping in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17. Advection Ocean advection 17.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of advection in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.momentum.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Flux form" # "Vector form" # TODO - please enter value(s) """ Explanation: 18. Advection --&gt; Momentum Properties of lateral momemtum advection scheme in ocean 18.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of lateral momemtum advection scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.momentum.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.2. Scheme Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of ocean momemtum advection scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.momentum.ALE') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 18.3. ALE Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Using ALE for vertical advection ? (if vertical coordinates are sigma) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 19. Advection --&gt; Lateral Tracers Properties of lateral tracer advection scheme in ocean 19.1. Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Order of lateral tracer advection scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 19.2. Flux Limiter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Monotonic flux limiter for lateral tracer advection scheme in ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 19.3. Effective Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Effective order of limited lateral tracer advection scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.4. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Descriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Ideal age" # "CFC 11" # "CFC 12" # "SF6" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19.5. Passive Tracers Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Passive tracers advected End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.6. Passive Tracers Advection Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Is advection of passive tracers different than active ? if so, describe. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.vertical_tracers.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20. Advection --&gt; Vertical Tracers Properties of vertical tracer advection scheme in ocean 20.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Descriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 20.2. Flux Limiter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Monotonic flux limiter for vertical tracer advection scheme in ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 21. Lateral Physics Ocean lateral physics 21.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of lateral physics in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Eddy active" # "Eddy admitting" # TODO - please enter value(s) """ Explanation: 21.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of transient eddy representation in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Horizontal" # "Isopycnal" # "Isoneutral" # "Geopotential" # "Iso-level" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22. Lateral Physics --&gt; Momentum --&gt; Operator Properties of lateral physics operator for momentum in ocean 22.1. Direction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Direction of lateral physics momemtum scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Harmonic" # "Bi-harmonic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.2. Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Order of lateral physics momemtum scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Second order" # "Higher order" # "Flux limiter" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.3. Discretisation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Discretisation of lateral physics momemtum scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Space varying" # "Time + space varying (Smagorinsky)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff Properties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean 23.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Lateral physics momemtum eddy viscosity coeff type in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 23.2. Constant Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.3. Variable Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.4. Coeff Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.5. Coeff Backscatter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 24. Lateral Physics --&gt; Tracers Properties of lateral physics for tracers in ocean 24.1. Mesoscale Closure Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there a mesoscale closure in the lateral physics tracers scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 24.2. Submesoscale Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Horizontal" # "Isopycnal" # "Isoneutral" # "Geopotential" # "Iso-level" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25. Lateral Physics --&gt; Tracers --&gt; Operator Properties of lateral physics operator for tracers in ocean 25.1. Direction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Direction of lateral physics tracers scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Harmonic" # "Bi-harmonic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.2. Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Order of lateral physics tracers scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Second order" # "Higher order" # "Flux limiter" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.3. Discretisation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Discretisation of lateral physics tracers scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Space varying" # "Time + space varying (Smagorinsky)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff Properties of eddy diffusity coeff in lateral physics tracers scheme in the ocean 26.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Lateral physics tracers eddy diffusity coeff type in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 26.2. Constant Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 26.3. Variable Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 26.4. Coeff Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 26.5. Coeff Backscatter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there backscatter in eddy diffusity coeff in lateral physics tracers scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "GM" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity Properties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean 27.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EIV in lateral physics tracers in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 27.2. Constant Val Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If EIV scheme for tracers is constant, specify coefficient value (M2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.3. Flux Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EIV flux (advective or skew) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.4. Added Diffusivity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EIV added diffusivity (constant, flow dependent or none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 28. Vertical Physics Ocean Vertical Physics 28.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of vertical physics in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details Properties of vertical physics in ocean 29.1. Langmuir Cells Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there Langmuir cells mixing in upper ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure - TKE" # "Turbulent closure - KPP" # "Turbulent closure - Mellor-Yamada" # "Turbulent closure - Bulk Mixed Layer" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers *Properties of boundary layer (BL) mixing on tracers in the ocean * 30.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of boundary layer mixing for tracers in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 30.2. Closure Order Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 30.3. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant BL mixing of tracers, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 30.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background BL mixing of tracers coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure - TKE" # "Turbulent closure - KPP" # "Turbulent closure - Mellor-Yamada" # "Turbulent closure - Bulk Mixed Layer" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum *Properties of boundary layer (BL) mixing on momentum in the ocean * 31.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of boundary layer mixing for momentum in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 31.2. Closure Order Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 31.3. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant BL mixing of momentum, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 31.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background BL mixing of momentum coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Non-penetrative convective adjustment" # "Enhanced vertical diffusion" # "Included in turbulence closure" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 32. Vertical Physics --&gt; Interior Mixing --&gt; Details *Properties of interior mixing in the ocean * 32.1. Convection Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of vertical convection in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 32.2. Tide Induced Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how tide induced mixing is modelled (barotropic, baroclinic, none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 32.3. Double Diffusion Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there double diffusion End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 32.4. Shear Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there interior shear mixing End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure / TKE" # "Turbulent closure - Mellor-Yamada" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers *Properties of interior mixing on tracers in the ocean * 33.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of interior mixing for tracers in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 33.2. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant interior mixing of tracers, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 33.3. Profile Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 33.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background interior mixing of tracers coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure / TKE" # "Turbulent closure - Mellor-Yamada" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum *Properties of interior mixing on momentum in the ocean * 34.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of interior mixing for momentum in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 34.2. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant interior mixing of momentum, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 34.3. Profile Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 34.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background interior mixing of momentum coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 35. Uplow Boundaries --&gt; Free Surface Properties of free surface in ocean 35.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of free surface in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Linear implicit" # "Linear filtered" # "Linear semi-explicit" # "Non-linear implicit" # "Non-linear filtered" # "Non-linear semi-explicit" # "Fully explicit" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 35.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Free surface scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 35.3. Embeded Seaice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the sea-ice embeded in the ocean model (instead of levitating) ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 36. Uplow Boundaries --&gt; Bottom Boundary Layer Properties of bottom boundary layer in ocean 36.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of bottom boundary layer in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Diffusive" # "Acvective" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 36.2. Type Of Bbl Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of bottom boundary layer in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 36.3. Lateral Mixing Coef Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 36.4. Sill Overflow Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe any specific treatment of sill overflows End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37. Boundary Forcing Ocean boundary forcing 37.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of boundary forcing in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.2. Surface Pressure Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.3. Momentum Flux Correction Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.4. Tracers Flux Correction Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.5. Wave Effects Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how wave effects are modelled at ocean surface. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.6. River Runoff Budget Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how river runoff from land surface is routed to ocean and any global adjustment done. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.7. Geothermal Heating Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how geothermal heating is present at ocean bottom. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Linear" # "Non-linear" # "Non-linear (drag function of speed of tides)" # "Constant drag coefficient" # "None" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction Properties of momentum bottom friction in ocean 38.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of momentum bottom friction in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Free-slip" # "No-slip" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction Properties of momentum lateral friction in ocean 39.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of momentum lateral friction in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "1 extinction depth" # "2 extinction depth" # "3 extinction depth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration Properties of sunlight penetration scheme in ocean 40.1. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of sunlight penetration scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 40.2. Ocean Colour Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the ocean sunlight penetration scheme ocean colour dependent ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 40.3. Extinction Depth Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe and list extinctions depths for sunlight penetration scheme (if applicable). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Freshwater flux" # "Virtual salt flux" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing Properties of surface fresh water forcing in ocean 41.1. From Atmopshere Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of surface fresh water forcing from atmos in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Freshwater flux" # "Virtual salt flux" # "Real salt flux" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 41.2. From Sea Ice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of surface fresh water forcing from sea-ice in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 41.3. Forced Mode Restoring Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of surface salinity restoring in forced mode (OMIP) End of explanation """
davidparks21/qso_lya_detection_pipeline
lucid_work/notebooks/feature_visualization.ipynb
mit
# Imports import numpy as np import tensorflow as tf import scipy.ndimage as nd import time import imageio import matplotlib import matplotlib.pyplot as plt import lucid.modelzoo.vision_models as models from lucid.misc.io import show import lucid.optvis.objectives as objectives import lucid.optvis.param as param import lucid.optvis.render as render import lucid.optvis.transform as transform from lucid.optvis.objectives import wrap_objective, _dot, _dot_cossim from lucid.optvis.transform import standard_transforms, crop_or_pad_to, pad, jitter, random_scale, random_rotate from lucid.modelzoo.vision_base import Model class DLA(Model): model_path = '../protobufs/full_model_8_13.pb' image_shape = [1, 400] image_value_range = [0, 1] input_name = 'x' model = DLA() model.load_graphdef() LAYERS = { 'conv1': ['Conv2D', 100], 'conv1_relu': ['Relu', 100], 'pool1': ['MaxPool', 100], 'conv2': ['Conv2D_1', 96], 'conv2_relu': ['Relu_1', 96], 'pool2': ['MaxPool_1', 96], 'conv3': ['Conv2D_2', 96], 'conv3_relu': ['Relu_2', 96], 'pool3': ['MaxPool_2', 96]} """ Explanation: Feature Visualization This notebook does basic feature visualization of David Parks DLA CNN Model Install imports, define and load model End of explanation """ # Specify param.image size to work with our models input, must be a multiple of 400. param_f = lambda: param.image(120, h=120, channels=3) # std_transforms = [ # pad(2, mode="constant", constant_value=.5), # jitter(2)] # transforms = std_transforms + [crop_or_pad_to(*model.image_shape[:2])] transforms = [] # Specify the objective # neuron = lambda n: objectives.neuron(LAYERS['pool1'][0], n) # obj = neuron(0) channel = lambda n: objectives.channel(LAYERS['pool1'][0], n) obj = channel(0) # Specify the number of optimzation steps, will output image at each step thresholds = (1, 2, 4, 8, 16, 32, 64, 128, 256, 512) # Render the objevtive imgs = render.render_vis(model, obj, param_f, thresholds=thresholds, transforms=transforms) show([nd.zoom(img[0], [1,1,1], order=0) for img in imgs]) # test = np.array(imgs) # test = test.reshape(400) # test = test[0:400:1] # fig = plt.figure(frameon=False); # ax = plt.Axes(fig, [0, 0, 1, 1]); # ax.set_axis_off(); # fig.add_axes(ax); # ax.plot(test, 'black'); # ax.set(xlim=(0, 400)); # ax.set(ylim=(0,1)) """ Explanation: Simple 3D Visualizations of a neuron Create 3D visualizations End of explanation """ # Specify param.image size param_f = lambda: param.image(400, h=1, channels=1) transforms = [] # Specify the objective # neuron = lambda n: objectives.neuron(LAYERS['pool1'][0], n) # obj = neuron(0) channel = lambda n: objectives.channel(LAYERS['pool1'][0], n) obj = channel(0) # Specify the number of optimzation steps, thresholds = (128,) # Render the objevtive imgs = render.render_vis(model, obj, param_f, thresholds=thresholds, transforms=transforms, verbose=False) # Display visualization test = np.array(imgs) test = test.reshape(400) test = test[0:400:1] fig = plt.figure(frameon=False); ax = plt.Axes(fig, [0, 0, 1, 1]); ax.set_axis_off(); fig.add_axes(ax); ax.plot(test, 'black'); ax.set(xlim=(0, 400)); ax.set(ylim=(0,1)) """ Explanation: Simple 1D visualizations End of explanation """
LSSTC-DSFP/LSSTC-DSFP-Sessions
Sessions/Session13/Day3/RealWorldLombScargle.ipynb
mit
np.random.seed(185) # calculate the periodogram x = 10*np.random.rand(100) y = gen_periodic_data(x, period=5.25, amplitude=7.4, noise=0.8) y_unc = np.ones_like(x)*np.sqrt(0.8) """ Explanation: Real World Considerations for the Lomb-Scargle Periodogram Version 0.2 By AA Miller (Northwestern/CIERA) 23 Sep 2021 In Lecture III we built the software necessary to estimate the power spectrum via the Lomb-Scargle periodogram. We also discovered that LS is somewhat slow. We will now leverage the faster implementation in astropy, while exploring some specific challenges related to real astrophysical light curves. The helper functions from Lecture III are recreated at the end of this notebook - execute those cells and then the cell below to recreate the simulated data from Lecture III. End of explanation """ from astropy.timeseries import LombScargle frequency, power = LombScargle(x, y, y_unc).autopower() """ Explanation: Problem 1) Other Considerations and Faster Implementations While our "home-grown" ls_periodogram works, it would take a loooooong time to evaluate $\sim4\times 10^5$ frequencies for $\sim2\times 10^7$ variable LSST sources. (as is often the case...) astropy to the rescue! Problem 1a LombScargle in astropy.timeseries is fast. Run it below to compare to ls_periodogram. End of explanation """ fig, ax = plt.subplots() ax.plot( # complete # complete # complete # complete fig.tight_layout() """ Explanation: Unlike ls_periodogram, LombScargle effectively takes no time to run on the simulated data. Problem 1b Plot the periodogram for the simulated data. End of explanation """ # complete freq_no_mean, power_no_mean = LombScargle( # complete freq_fit_mean, power_fit_mean = LombScargle( # complete fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True) ax1.plot( # complete ax2.plot( # complete ax1.set_xlim(0,15) fig.tight_layout() """ Explanation: There are many choices regarding the calculation of the periodogram, so read the docs. Floating Mean Periodogram A basic assumption that we preivously made is that the data are "centered" - in other words, our model explicitly assumes that the signal oscillates about a mean of 0. For astronomical applications, this assumption can be harmful. Instead, it is useful to fit for the mean of the signal in addition to the periodic component (as is the default in LombScargle): $$y(t;f) = y_0(f) + A_f \sin(2\pi f(t - \phi_f).$$ To illustrate why this is important for astronomy, assume that any signal fainter than $-2$ in our simulated data cannot be detected. Problem 1c Remove the observations from x and y where $y \le -2$ and calculate the periodogram both with and without fitting the mean (fit_mean = False in the call to LombScargle). Plot the periodograms. Do both methods recover the correct period? End of explanation """ fit_mean_model = LombScargle(x[bright], y[bright], y_unc[bright], fit_mean=True).model(np.linspace(0,10,1000), freq_fit_mean[np.argmax(power_fit_mean)]) no_mean_model = LombScargle(x[bright], y[bright], y_unc[bright], fit_mean=False).model(np.linspace(0,10,1000), freq_no_mean[np.argmax(power_no_mean)]) fig, ax = plt.subplots() ax.errorbar(x[bright], y[bright], y_unc[bright], fmt='o', label='data') ax.plot(np.linspace(0,10,1000), fit_mean_model, label='fit mean') ax.plot(np.linspace(0,10,1000), no_mean_model, label='no mean') ax.set_xlabel('x') ax.set_ylabel('y') ax.legend() fig.tight_layout() """ Explanation: We can see that the best fit model doesn't match the signal in the case where we do not allow a floating mean. End of explanation """ # set up simulated observations t_obs = np.arange(0, 10*365, 3) # 3d cadence # complete # complete # complete y = gen_periodic_data( # complete # complete fig, ax = plt.subplots() ax.errorbar( # complete ax.set_xlabel("Time (d)") ax.set_ylabel("Flux (arbitrary units)") """ Explanation: Window Functions Recall that the convolution theorem tells us that: $$\mathcal{F}[f\cdot g] = \mathcal{F}(f) \ast \mathcal{F}(g)$$ Telescope observations are effectively the product of a continous signal with several delta functions (corresponding to the times of observations). As a result, the convolution that produces the periodogram will retain signal from both the source and the observational cadence. To illustrate this effect, let us simulate "realistic" observations for a 10 year telescope survey. We do this by assuming that a source is observed every 3 nights (the LSST cadence) within $\pm 4\,\mathrm{hr}$ of the same time, and that $\sim 30\%$ of the observations did not occur due to bad weather. We further assume that the source cannot be observed for 40% of the year because it is behind the sun. Simulate a periodic signal with this cadence, a period = 220 days (typical for Miras), amplitude = 12.4, and noise = 1. Plot the simulated light curve. Problem 1d Simulate a periodic signal with 3 day cadence (and the observing conditions described above), a period = 220 days (typical for Miras), amplitude = 12.4, and variance of the noise = 1. Plot the simulated light curve. End of explanation """ ls = LombScargle( # complete freq_window, power_window = # complete fig, ax = plt.subplots() ax.plot( # complete ax.set_ylabel("Power") ax.set_xlabel("Period (d)") ax.set_xlim(0,500) axins = plt.axes([.2, .65, .5, .2]) axins.plot( # complete axins.set_xlim(0,5) """ Explanation: Problem 1e Calculate and plot the periodogram for the window function (i.e., set y = 1 in LombScargle) of the observations. Do you notice any significant power? Hint - you may need to zoom in on the plot to see all the relevant features. End of explanation """ ls = LombScargle( # complete frequency, power = # complete fig, (ax,ax2) = plt.subplots(2,1, sharex=True) ax.plot( # complete ax.set_ylabel("Power") ax.set_ylim(0,1) ax2.plot( # complete ax2.set_ylabel("Power") ax2.set_xlabel("Period (d)") ax2.set_xlim(0,10) fig.tight_layout() """ Explanation: Interestingly, there are very strong peaks in the data at $P \approx 3\,\mathrm{d} \;\&\; 365\,\mathrm{d}$. What is this telling us? Essentially that observations are likely to be repeated at intervals of 3 or 365 days (shorter period spikes are aliases of the 3 d peak). This is important to understand, however, because this same power will be present in the periodogram where we search for the periodic signal. Problem 1f Calculate the periodogram for the data and compare it to the periodogram for the window function. End of explanation """ data = # complete fig, ax = plt.subplots() ax.errorbar( # complete ax.set_xlabel('HJD (d)') ax.set_ylabel('V (mag)') ax.set_ylim(ax.get_ylim()[::-1]) fig.tight_layout() """ Explanation: Uncertainty on the best-fit period How do we report uncertainties on the best-fit period from LS? For example, for the previously simulated LSST light curve we would want to report something like $P = 102 \pm 4\,\mathrm{d}$. However, the uncertainty from LS periodograms cannot be determined in this way. Naively, one could report the width of the peak in the periodogram as the uncertainty in the fit. However, we previously saw that the peak width $\propto 1/T$ (the peak width does not decrease as the number of observations or their S/N increases; see Vander Plas 2017). Reporting such an uncertainty is particularly ridiculous for long duration surveys, whereby the peaks become very very narrow. An alternative approach is to report the False Alarm Probability (FAP), which estimates the probability that a dataset with no periodic signal could produce a peak of similar magnitude, due to random gaussian fluctuations, as the data. There are a few different methods to calculate the FAP. Perhaps the most useful, however, is the bootstrap method. To obtain a bootstrap estimate of the LS FAP one leaves the observation times fixed, and then draws new observation values with replacement from the actual set of observations. This procedure is then repeated many times to determine the FAP. One nice advantage of this procedure is that any effects due to the window function will be imprinted in each iteration of the bootstrap resampling. The major disadvantage is that many many periodograms must be calculated. The rule of thumb is that to acieve a FAP $= p_\mathrm{false}$, one must run $n_\mathrm{boot} \approx 10/p_\mathrm{false}$ bootstrap periodogram calculations. Thus, an FAP $\approx 0.1\%$ requires an increase of 1000 in computational time. LombScargle provides the false_alarm_probability method, including a bootstrap option. We skip that for now in the interest of time. As a final note of caution - be weary of over-interpreting the FAP. The specific question answered by the FAP is, what is the probability that gaussian fluctations could produce a signal of equivalent magnitude? Whereas, the question we generally want to answer is: did a periodic signal produce these data? These questions are very different, and thus, the FAP cannot be used to prove that a source is periodic. Problem 2) Real-world considerations We have covered many, though not all, considerations that are necessary when employing a Lomb Scargle periodogram. We have not yet, however, encountered real world data. Here we highlight some of the issues associated with astronomical light curves. We will now use LS to analyze actual data from the All Sky Automated Survey (ASAS). Download the example light curve. Problem 2a Read in the light curve from example_asas_lc.dat. Plot the light curve. Hint - I recommend using astropy Tables or pandas dataframe. End of explanation """ frequency, power = # complete # complete fig,ax = plt.subplots() ax.plot(# complete ax.set_ylabel("Power") ax.set_xlabel("Period (d)") ax.set_xlim(0, 800) axins = plt.axes([.25, .55, .6, .3]) axins.plot( # complete axins.set_xlim(0,5) fig.tight_layout() # plot the phase folded light curve phase_plot( # complete """ Explanation: Problem 2b Use LombScargle to measure the periodogram. Then plot the periodogram and the phase folded light curve at the best-fit period. Hint - search periods longer than 2 hr. End of explanation """ phase_plot( # complete """ Explanation: Problem 2c Now plot the light curve folded at twice the best LS period. Which of these 2 is better? End of explanation """ phase_plot( # complete phase_grid = np.linspace(0,1,1000) plt.plot( # complete plt.tight_layout() """ Explanation: Herein lies a fundamental issue regarding the LS periodogram: the model does not search for "periodicity." The LS model asks if the data support a sinusoidal signal. As astronomers we typically assume this question is good enough, but as we can see in the example of this eclipsing binary that is not the case [and this is not limited to eclipsing binaries]. We can see why LS is not sufficient for an EB by comparing the model to the phase-folded light curve: Problem 2d Overplot the model on top of the phase folded light curve. Hint – you can access the best LS fit via the .model() method on LombScargle objects in astropy. End of explanation """ for i in np.arange(1,6): frequency, power = # complete # complete print('For {:d} harmonics, P_LS = {:.8f}'.format( # complete """ Explanation: One way to combat this very specific issue is to include more Fourier terms at the harmonic of the best fit period. This is easy to implement in LombScargle with the nterms keyword. [Though always be weary of adding degrees of freedom to a model, especially at the large pipeline level of analysis.] Problem 2e Calculate the LS periodogram for the eclipsing binary, with nterms = 1, 2, 3, 4, 5. Report the best-fit period for each of these models. Hint - we have good reason to believe that the best fit frequency is < 3 in this case, so set maximum_frequency = 3. End of explanation """ best_period = # complete phase_plot( # complete phase_grid = np.linspace(0,1,1000) plt.plot( # complete plt.tight_layout() """ Explanation: Interestingly, for the $n=2, 3, 4$ harmonics, it appears as though we get the period that we have visually confirmed. However, by $n=5$ harmonics we no longer get a reasonable answer. Again - be very careful about adding harmonics, especially in large analysis pipelines. Problem 2f Plot the the $n = 4$ model on top of the light curve folded at the correct period. End of explanation """ def gen_periodic_data(x, period=1, amplitude=1, phase=0, noise=0): '''Generate periodic data Parameters ---------- x : array-like input values to evaluate the array period : float (default=1) period of the periodic signal amplitude : float (default=1) amplitude of the periodic signal phase : float (default=0) phase offset of the periodic signal noise : float (default=0) variance of the noise term added to the periodic signal Returns ------- y : array-like Periodic signal evaluated at all points x ''' y1 = # complete amp2 = # complete phase2 = # complete y2 = # complete amp3 = # complete phase3 = # complete y3 = # complete amp4 = # complete phase4 = # complete y4 = # complete dy = # complete return y1 + y2 + y3 + y4 + dy """ Explanation: This example also shows why it is somewhat strange to provide an uncertainty with a LS best-fit period. Errors tend to be catastrophic, and not some small fractional percentage, with the LS periodogram. In the case of the above EB, the "best-fit" period was off by a factor 2. This is not isolated to EBs, however, LS periodograms frequently identify a correct harmonic of the true period, but not the actual period of variability. Problem 3) The "Real" World Problem 3a Re-write gen_periodic_data to create periodic signals using the first 4 harmonics in a Fourier series. The $n > 1$ harmonics should have random phase offsets, and the amplitude of the $n > 1$ harmonics should be drawn randomly from a uniform distribution between 0 and amplitude the amplitude of the first harmonic. End of explanation """ np.random.seed(185) x_grid = np.linspace( # complete y = gen_periodic_data( # complete phase_plot( # complete """ Explanation: Problem 3b Confirm the updated version of gen_periodic_data works by creating a phase plot for a simulated signal with amplitude = 4, period = 1.234, and noise=0.81, and 100 observations obtained on a regular grid from 0 to 50. End of explanation """ n_lc = # complete true_p = np.zeros(n_lc) ls_p = np.zeros_like(true_p) for lc in range(n_lc): # set up simulated observations t_obs = np.arange(0, 2*365, 3) # 3d cadence # complete # complete # complete period = # complete true_p[lc] = # complete amp = # complete noise = # complete y = gen_periodic_data( # complete freq, power = LombScargle( # complete ls_p[lc] = 1/freq[np.argmax(power)] """ Explanation: Problem 3c Simulate 1000 "realistic" light curves using the astronomical cadence from 1d for a full survey duration of 2 years. For each light curve draw the period randomly from [0.2, 10], and the amplitude randomly from [1, 5], and the noise randomly from [1,2]. Record the period in an array true_p and estimate the period for the simulated data via LS and store the result in an array ls_p. End of explanation """ fig, ax = plt.subplots() ax.plot( # complete ax.set_ylim(0, 20) ax.set_xlabel('True period (d)') ax.set_ylabel('LS peak (d)') fig.tight_layout() """ Explanation: Problem 3d Plot the LS recovered period vs. the true period for the simulated sources. Do you notice anything interesting? Do you manage to recover the correct period most of the time? End of explanation """ # complete """ Explanation: Problem 3e For how many of the simulated sources do you recover the correct period? Consider a period estimate "correct" if the LS estimate is within 10% of the true period. End of explanation """ p_grid = np.linspace(1e-1,10,100) fig, ax = plt.subplots() ax.plot(# complete # complete # complete # complete ax.set_ylim(0, 9) ax.set_xlabel('True period (d)') ax.set_ylabel('LS peak (d)') fig.tight_layout() """ Explanation: The results are a bit disappointing. However, it is also clear that there is strong structure in the plot off the 1:1 line. That structure can be understood in terms of the window function that was discussed in 1d, 1e, and 1f. Problem 3f Recreate the plot in 3d and overplot the line $$P_\mathrm{LS} = \left(\frac{1}{P_\mathrm{true}} + \frac{n}{3}\right)^{-1}$$ for $n = -2, -1, 1, 2$. Hint - only plot the values where $P_\mathrm{LS} > 0$ since, by definition, we do not search for negative periods. End of explanation """ p_grid = np.linspace(1e-1,10,100) fig, ax = plt.subplots() ax.plot(# complete # complete # complete # complete ax.set_ylim(0, 2) ax.set_xlabel('True period (d)') ax.set_ylabel('LS peak (d)') fig.tight_layout() """ Explanation: What in the... We see that these lines account for a lot of the off-diagonal structure in this plot. In this case, what is happening is that the true frequency of the signal $f_\mathrm{true}$ is being aliased by the window function and it's harmonics. In other words LS is pulling out $f_\mathrm{true} + n\delta{f}$, where $n$ is an integer and $\delta{f}$ is the observational cadence $3\,\mathrm{d}$. Many of the false positives can be explained via the window function. Similarly, LS might be recovering higher order harmonics of the true period since we aren't trying to recover pure sinusoidal signals in this simulation. These harmonics would also be aliased by the window function, so LS will pull out $f_\mathrm{true}/m + n\delta{f}$, where $m$ is a postive integer. Problem 3g Recreate the plot in 3d and overplot lines for the $m = 2$ harmonic aliased with $n = -1, 1, 2$. Hint - only plot the values where $P_\mathrm{LS} > 0$ since, by definition, we do not search for negative periods. End of explanation """ p_grid = np.linspace(1e-1,10,1000) fig, ax = plt.subplots() ax.plot(# complete # complete # complete # complete ax.set_ylim(0, 15) ax.set_xlabel('True period (d)') ax.set_ylabel('LS peak (d)') fig.tight_layout() """ Explanation: The last bit of structure can be understood via the symmetry in the LS periodogram about 0. In particular, if there is an aliased frequency that is less than 0, which will occur for $n < 0$ in the equations above, then there will also be power at the positive value of that frequency. In other words, LS will pull out $|f_\mathrm{true}/m + n\delta{f}|$. Problem 3h Recreate the plot in 3d and overplot lines for the "reflected" $m = 1$ harmonic aliased with $n = -3, -2, -1$. Hint - only plot the values where $P_\mathrm{LS} < 0$ since we are looking for "reflected" peaks in the periodogram in this case. End of explanation """ def gen_periodic_data(x, period=1, amplitude=1, phase=0, noise=0): '''Generate periodic data given the function inputs y = A*cos(x/p - phase) + noise Parameters ---------- x : array-like input values to evaluate the array period : float (default=1) period of the periodic signal amplitude : float (default=1) amplitude of the periodic signal phase : float (default=0) phase offset of the periodic signal noise : float (default=0) variance of the noise term added to the periodic signal Returns ------- y : array-like Periodic signal evaluated at all points x ''' y = amplitude*np.sin(2*np.pi*x/(period) - phase) + np.random.normal(0, np.sqrt(noise), size=len(x)) return y """ Explanation: Now we have seen that nearly all the structure in the LS period vs. true period plot can be explained via aliasing with the window function! This is good (we understand why the results do not conform to what we expect), but also bad, (we were not able to recover the correct period for the majority of our sources). Ultimately, this means - be careful when inspecting the results of the LS periodogram as the peaks aren't driven solely by the signal from the source in question! (If only there were some way to get rid of the sun, then we'd never have these problems...) Conclusions The Lomb-Scargle periodogram is a useful tool to search for sinusoidal signals in noisy, irregular data. However, as highlighted throughout, there are many ways in which the methodology can run awry. In closing, I will summarize some practical considerations from VanderPlas (2017): Choose an appropriate frequency grid (defaults in LombScargle are not sufficient) Calculate the LS periodogram for the observation times to search for dominant signals (e.g., 1 day in astro) Compute LS periodogram for data (avoid multi-Fourier models if signal unknown) Plot periodogram and various FAP levels (do not over-interpret FAP) If window function shows strong aliasing, plot phased light curve at each peak (now add more Fourier terms if necessary) If looking for a particular signal (e.g., detatched EBs), consider different methods that better match expected signal Inject fake signals into data to understand systematics if using LS in a survey pipeline Finally, Finally As a very last note: know that there are many different ways to search for periodicity in astronomical data. Depending on your application (and computational resources), LS may be a poor choice (even though this is often the default choice by all astronomers!) Graham et al. (2013) provides a summary of several methods using actual astronomical data. The results of that study show that no single method is best. However, they also show that no single method performs particularly well: the detection efficiences in Graham et al. (2013) are disappointing given the importance of periodicity in astronomical signals. Period detection is a fundamental problem for astronomical time-series, but it is especially difficult in "production" mode. Be careful when setting up pipelines to analyze large datasets. Challenge Problem Re-create problem 4, but include additional terms in the fit with the LS periodogram. What differences do you notice when comparing the true period to the best-fit LS periods? Helper Functions We developed useful helper functions as part of Lecture III from this session. These functions generate periodic data, and phase fold light curves on a specified period. These functions will once again prove useful, so we include them here in order to simulate data above. Helper 1 Create a function, gen_periodic_data, that creates simulated data (including noise) over a grid of user supplied positions: $$ y = A\,cos\left(\frac{x}{P} - \phi\right) + \sigma_y$$ where $A, P, \phi$ are inputs to the function. gen_periodic_data should include Gaussian noise, $\sigma_y$, for each output $y_i$. End of explanation """ def phase_plot(x, y, period, y_unc = 0.0, mag_plot=False): '''Create phase-folded plot of input data x, y Parameters ---------- x : array-like data values along abscissa y : array-like data values along ordinate period : float period to fold the data y_unc : array-like uncertainty of the ''' phases = (x/period) % 1 if isinstance(y_unc, (np.floating, float)): y_unc = np.ones_like(x)*y_unc plot_order = np.argsort(phases) fig, ax = plt.subplots() ax.errorbar(phases[plot_order], y[plot_order], y_unc[plot_order], fmt='o', mec="0.2", mew=0.1) ax.set_xlabel("phase") ax.set_ylabel("signal") if mag_plot: ax.set_ylim(ax.get_ylim()[::-1]) fig.tight_layout() """ Explanation: Helper 2 Create a function, phase_plot, that takes x, y, and $P$ as inputs to create a phase-folded light curve (i.e., plot the data at their respective phase values given the period $P$). Include an optional argument, y_unc, to include uncertainties on the y values, when available. End of explanation """
MissouriDSA/twitter-locale
twitter/twitter_7.ipynb
mit
# BE SURE TO RUN THIS CELL BEFORE ANY OF THE OTHER CELLS import psycopg2 import pandas as pd import re # pull in our stopwords from nltk.corpus import stopwords stops = stopwords.words('english') """ Explanation: Twitter: An Analysis Part 7 We've explored the basics of natural language processing using Postgres and the steps we took are often a great starting point for the rest of the analysis, but rarely will you ever just stop with those results. You will often have to pull in data after performing some aggregations, joins, etc... and then continuing on with a general purpose programming language like Python or R. In this notebook, we are going to use postgres to pull in our data but then we are going to perform some more complex data carpentry. End of explanation """ # define our query statement = """ SELECT lower(t.text) as tweet, lower(h.text) as hashtag FROM twitter.tweet t, twitter.hashtag h WHERE t.job_id = 273 AND t.text NOT LIKE 'RT%' AND t.iso_language = 'en' AND t.tweet_id_str = h.tweet_id LIMIT 100000;""" try: connect_str = "dbname='twitter' user='dsa_ro_user' host='dbase.dsa.missouri.edu'password='readonly'" # use our connection values to establish a connection conn = psycopg2.connect(connect_str) cursor = conn.cursor() # execute the statement from above cursor.execute(statement) column_names = [desc[0] for desc in cursor.description] # fetch all of the rows associated with the query rows = cursor.fetchall() except Exception as e: print("Uh oh, can't connect. Invalid dbname, user or password?") print(e) tweet_dict = {} for i in list(range(len(column_names))): tweet_dict['{}'.format(column_names[i])] = [x[i] for x in rows] tweets = pd.DataFrame(tweet_dict) tweets.head() """ Explanation: Our query this time is going to extract the both the hashtag and the tweets associated with the hashtag. We are going to created documents full of tweets that are defined by their hashtags so we need to be able to reference the hashtags per tweet. ...oh and we are only taking from Chicago. End of explanation """ hashtag_groups = tweets.groupby('hashtag').size().sort_values().reset_index() """ Explanation: We can now use pandas to count how many times each hashtag was used. We can turn this into a data frame. End of explanation """ hashtag_groups.tail() """ Explanation: ...and the most popular hashtags for Chicago. End of explanation """ docs = tweets.groupby('hashtag')['tweet'].apply(list).reset_index() """ Explanation: Twitter is unique from other types of natural language given the constraints on size. This often makes it difficult to find coherent topics from tweets. Therefore, we want to create documents of tweets. Each document is a list of tweets that contain a particular hashtag. So what we want to do is create a list of tweets per hashtag. End of explanation """ docs.head() """ Explanation: Above, we are grouping by hashtag and then concatenating the tweets per group into a list. So this is going to be a data frame where the first attribute is the hashtag and the second is a list of tweets with that hashtag. Let's take a look... End of explanation """ def removePatterns(string, replacement, *pats): for pattern in pats: string = re.sub(pattern,replacement,string) return string """ Explanation: We now need to use a helper function to remove some patterns from the tweets that we don't want. First, we don't want '@' signs or '#'s. We also want to remove urls. We will create a regular expression to do that. End of explanation """ s = "I have @3 friends named #Arnold" removePatterns(s,'', '#','@') """ Explanation: The function above takes in a sting and replace each of the patterns in that string with the replacement. Notice that we use *pats. This is a way to create an unspecified number of arguments. Let's look at an example. End of explanation """ url = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' """ Explanation: This took the string s and replaced @ and # with a blank ''. Below, we are going to create a regular expression that matches urls. We also want these removed End of explanation """ from nltk.tokenize import RegexpTokenizer tokenizer = RegexpTokenizer(r'\w+') tokenized_docs = [] for i in docs['tweet']: document = [] for text in i: document.append(tokenizer.tokenize(removePatterns(text,'','@','#',url).lower())) tokenized_docs.append(document) """ Explanation: In natural language processing, you often have to tokenize a task, which is to break it up text up into components. These components are often splitting on words so that each word is a unit called a token. Below we are going to simultaneously remove the patterns we don't want and tokenize each tweet and save it to a list of lists called tokenized_docs. End of explanation """ tokenized_docs[0] """ Explanation: We can now look at the first item of tokenized_docs to see what it looks like. Notice that it contains a list/lists. End of explanation """ stops_removed = [] for doc in tokenized_docs: phrases = [] for phrase in doc: p = [i for i in phrase if i not in stops] phrases.append(p) stops_removed.append(phrases) """ Explanation: We then remove the stop words and return it to a list of lists object. End of explanation """ from nltk.stem.porter import PorterStemmer p_stemmer = PorterStemmer() texts = [] for doc in stops_removed: stemmed = [] for phrase in doc: try: stemmed.append([p_stemmer.stem(i) for i in phrase]) except: pass texts.append(stemmed) """ Explanation: After tokenization, there is also stemming. This is the process and getting words to their base version. We are going to do a similar process here where we save it to a list of lists called texts. NOTE: This could take a couple of minutes End of explanation """ texts[0] """ Explanation: And let's look at the first item... End of explanation """
justanr/notebooks
fillingtheswearjar.ipynb
mit
def run(prog: str, stdin: str="") -> StringIO: stdout = StringIO() memory = [0] * 30_000 memptr = 0 instrptr = 0 progsize = len(prog) # stores the location of the last [ s we encountered brackets = [] while instrptr < progsize: op = progsize[instrptr] instrptr += 1 if op == '+': memory[memptr] += 1 elif op == '-': memory[memptr] -= 1 # and so on else: # not a BF command pass stdout.seek(0) return stdout """ Explanation: Filling the Swear Jar A tale of three languages Alec Reiter (@justanr) Brainfuck Urban Mueller, 1993 Turning ~~Complete~~ Tarpit 8 commands Tape, Tape Pointer, Instruction Pointer ++++++++[&gt;++++[&gt;++&gt;+++&gt;+++&gt;+&lt;&lt;&lt;&lt;-]&gt;+&gt;+&gt;-&gt;&gt;+[&lt;]&lt;-]&gt;&gt;.&gt;---.+++++++..+++.&gt;&gt;.&lt;-.&lt;.+++.------.--------.&gt;&gt;+.&gt;++. Why the... Rust! (but we'll get to that) Different Oddly fun The 8 Commands | Command | Meaning | |---------|------------------------------------| | + | Incr Cell | | - | Decr Cell | | > | Move Right | | < | Move Left | | [ | Conditional Jump (if cell is 0) | | ] | Conditional Jump (if cell isn't 0) | | . | Output Cell | | , | Read into Cell | Common Constructs [-] set current cell to 0 [-&gt;+&lt;] add current cell to another [-&gt;++&lt;] multiplication [&lt;] find last zero cell Ambiguities "Infinite tape" -- reference impl uses 30,000 cells How big are cells? -- u8 or u32? or signed? So, implementations... Turns out I have no idea what I'm doing Python to the rescue First Attempt End of explanation """ class BFToken(Enum): Incr = '+' Decr = '-' MoveL = '<' MoveR = '>' StdIn = ',' StdOut = '.' JumpF = '[' JumpB = ']' partners = { BFToken.Incr: BFToken.Decr, BFToken.Decr: BFToken.Incr, BFToken.MoveL: BFToken.MoveR, BFToken.MoveR: BFToken.MoveL } def _parse(prog: str) -> Iterator[BFToken]: for char in prog: try: yield BFToken(char) except ValueError: pass def parse(prog: str) -> List[BFToken]: return list(_parse(prog)) parse('++a+--') """ Explanation: Pros Very simple Jumping back is easy Cons Very naive Jumping forward isn't easy Incorrect programs not detected Parsing End of explanation """ def collapse(prog: List[BFToken]) -> List[BFToken]: program = [] for token in prog: ... # uh wait a second """ Explanation: Optimizing Jump table Combine like tokens End of explanation """ class IRToken(NamedTuple): token: BFToken amount: int def collapse(prog: List[BFToken]) -> List[IRToken]: program: List[IRToken] = [] for token in prog: if len(program) == 0 or token not in partners: program.append(IRToken(token, 1)) continue previous = program.pop() if previous.token == token: new_token = previous._replace(amount=previous.amount+1) if new_token.amount != 0: program.append(new_token) elif previous.token == partners[token]: new_token = previous._replace(amount=previous.amount-1) if new_token.amount != 0: program.append(new_token) else: program.append(previous) program.append(IRToken(token, 1)) return program def build_jump_table(prog: List[IRToken]): brackets = [] for idx, token in enumerate(prog, 0): if token.token == BFToken.JumpF: brackets.append(idx) elif token.token == BFToken.JumpB: try: partner = brackets.pop() except IndexError: raise BFError(f"Unmatched bracket at: {idx}") from None else: prog[idx] = prog[idx]._replace(amount=partner) prog[partner] = prog[partner]._replace(amount=idx) if brackets: raise BFError(f"Unmatched brackets at: {', '.join([str(x) for x in brackets])}") tokens = collapse(parse('++[->++++++++<]')) build_jump_table(tokens) tokens def run(prog: List[IRToken], stdin: str="") -> StringIO: stdout = StringIO() stdiniter = iter(stdin) getc = lambda: ord(next(stdiniter, '\0')) putc = lambda: stdout.write(chr(memory[memptr])) memory = [0] * 30_000 memptr = 0 instrptr = 0 proglength = len(prog) while instrptr < proglength: op = prog[instrptr] if op.token == BFToken.StdOut: putc() elif op.token == BFToken.StdIn: memory[memptr] = getc() elif op.token == BFToken.Incr: memory[memptr] += op.amount elif op.token == BFToken.Decr: memory[memptr] -= op.amount elif op.token == BFToken.MoveL: memptr = (memptr - op.amount) % 30_000 elif op.token == BFToken.MoveR: memptr = (memptr + op.amount) % 30_000 elif op.token == BFToken.JumpF: if memory[memptr] == 0: instrptr = op.amount elif op.token == BFToken.JumpB: if memory[memptr] != 0: instrptr = op.amount instrptr += 1 stdout.seek(0) return stdout def bf(source: str, stdin: str="") -> StringIO: prog = collapse(parse(source)) build_jump_table(prog) return run(prog, stdin) %%time print(bf("++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++.").read()) triangle = """ > + + + + [ < + + + + + + + + > - ] > + + + + + + + + [ > + + + + < - ] > > + + > > > + > > > + < < < < < < < < < < [ - [ - > + < ] > [ - < + > > > . < < ] > > > [ [ - > + + + + + + + + [ > + + + + < - ] > . < < [ - > + < ] + > [ - > + + + + + + + + + + < < + > ] > . [ - ] > ] ] + < < < [ - [ - > + < ] + > [ - < + > > > - [ - > + < ] + + > [ - < - > ] < < < ] < < < < ] + + + + + + + + + + . + + + . [ - ] < ] + + + + + * * * * * M a d e * B y : * N Y Y R I K K I * 2 0 0 2 * * * * * """ %%time result = bf(triangle) print(result.read()) ZtoA = """>++[<+++++++++++++>-]<[[>+>+<<-]>[<+>-]++++++++ [>++++++++<-]>.[-]<<>++++++++++[>++++++++++[>++ ++++++++[>++++++++++[>++++++++++[>++++++++++[>+ +++++++++[-]<-]<-]<-]<-]<-]<-]<-]++++++++++.""" %%time print(bf(ZtoA).read()) """ Explanation: Missing Something End of explanation """ def handle_clear(tokens: List[BFToken]) -> List[BFToken]: program: List[BFToken] = [] clear = [BFToken.JumpF, BFToken.Decr, BFToken.JumpB] for token in tokens: program.append(token) if len(program) < 3: continue last_three = program[-3:] if last_three == clear: program[-3:] = [BFToken.ZeroOut] return program """ Explanation: Where are we spending time? [ I-1 ] 26_000_000 [ M1 I10 [ I-1 ] M-1 I-1 ] -&gt; 2_600_000 [ M1 I10 [ M1 I10 [ I-1 ] M-1 I-1 ] M-1 I-1 ] -&gt; 260_000 [ M1 I10 [ M1 I10 [ M1 I10 [ I-1 ] M-1 I-1 ] M-1 I-1 ] M-1 I-1 ] -&gt; 26_000 [ M1 I10 [ M1 I10 [ M1 I10 [ M1 I10 [ I-1 ] M-1 I-1 ] M-1 I-1 ] M-1 I-1 ] M-1 I-1 ] -&gt; 2_600 [ M1 I10 [ M1 I10 [ M1 I10 [ M1 I10 [ M1 I10 [ I-1 ] M-1 I-1 ] M-1 I-1 ] M-1 I-1 ] M-1 I-1 ] M-1 I-1 ] -&gt; 260 Idea Transform [-] into memory[memptr] = 0 End of explanation """ %%bash time ./bf triangle.bf > /dev/null %%bash time ./bf ZtoA.bf > /dev/null %%bash time ./bf mandel.bf > /dev/null """ Explanation: 38min 34s Python isn't known for being fast Cython, numba, etc can help but... Rust 🎺🎺🎺 insert hype here But seriously Opt-in mutability Algebraic Data Types Functional + Imperative High level but fast Representation rust enum BrainFuckToken { Move(isize), JumpF(usize), JumpB(usize), Incr(i32) StdIn, StdOut, ZeroOut } Parsing rust impl BrainFuckToken { pub fn from_char(c: char) -&gt; Option&lt;BrainFuckToken&gt; { match c { '+' =&gt; Some(BrainFuckToken::Incr(1)), '-' =&gt; Some(BrainFuckToken::Incr(-1)), '&gt;' =&gt; Some(BrainFuckToken::Move(1)), '&lt;' =&gt; Some(BrainFuckToken::Move(-1)), '.' =&gt; Some(BrainFuckToken::StdOut), ',' =&gt; Some(BrainFuckToken::StdIn), '[' =&gt; Some(BrainFuckToken::JumpF(0)), ']' =&gt; Some(BrainFuckToken::JumpB(0)), _ =&gt; None, } } } Jumps ```rust fn build_jumps(tokens: &mut Vec<BrainFuckToken>) { let mut brackets = Vec::new(); for idx in 0..tokens.len() { match tokens[idx] { BrainFuckToken::JumpF() => brackets.push(idx), BrainFuckToken::JumpB() => { let partner = brackets .pop() .unwrap_or_else(|| panic!("unmatched bracket at {}", idx)); mem::replace(&mut tokens[idx], BrainFuckToken::JumpB(partner)); mem::replace(&mut tokens[partner], BrainFuckToken::JumpF(idx)); } _ => {} } } if brackets.len() != 0 { panic!("Unmatched brackets at: {:?}", brackets); } } ``` Run loop rust while let Some(instr) = self.ops.get(self.loc) { match *instr { BrainFuckToken::JumpF(x) =&gt; { if self.tape.get() == 0 { self.loc = x; } else { self.tracer.trace((self.loc, x)); } } BrainFuckToken::JumpB(x) =&gt; { if self.tape.get() != 0 { self.loc = x; } } BrainFuckToken::Move(x) =&gt; self.tape.move_(x), BrainFuckToken::Incr(x) =&gt; self.tape.incr(x), BrainFuckToken::StdIn =&gt; self.tape.putc(input_iter.next().unwrap_or('\0')), BrainFuckToken::StdOut =&gt; out.push(self.tape.getc()). BrainFuckToken::ZeroOut =&gt; self.tape.put(0), } self.loc += 1; } But how fast? End of explanation """
statsmodels/statsmodels.github.io
v0.13.1/examples/notebooks/generated/tsa_arma_0.ipynb
bsd-3-clause
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd import statsmodels.api as sm from scipy import stats from statsmodels.tsa.arima.model import ARIMA from statsmodels.graphics.api import qqplot """ Explanation: Autoregressive Moving Average (ARMA): Sunspots data End of explanation """ print(sm.datasets.sunspots.NOTE) dta = sm.datasets.sunspots.load_pandas().data dta.index = pd.Index(sm.tsa.datetools.dates_from_range("1700", "2008")) dta.index.freq = dta.index.inferred_freq del dta["YEAR"] dta.plot(figsize=(12, 8)) fig = plt.figure(figsize=(12, 8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(dta.values.squeeze(), lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(dta, lags=40, ax=ax2) arma_mod20 = ARIMA(dta, order=(2, 0, 0)).fit() print(arma_mod20.params) arma_mod30 = ARIMA(dta, order=(3, 0, 0)).fit() print(arma_mod20.aic, arma_mod20.bic, arma_mod20.hqic) print(arma_mod30.params) print(arma_mod30.aic, arma_mod30.bic, arma_mod30.hqic) """ Explanation: Sunspots Data End of explanation """ sm.stats.durbin_watson(arma_mod30.resid.values) fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111) ax = arma_mod30.resid.plot(ax=ax) resid = arma_mod30.resid stats.normaltest(resid) fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111) fig = qqplot(resid, line="q", ax=ax, fit=True) fig = plt.figure(figsize=(12, 8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(resid.values.squeeze(), lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(resid, lags=40, ax=ax2) r, q, p = sm.tsa.acf(resid.values.squeeze(), fft=True, qstat=True) data = np.c_[np.arange(1, 25), r[1:], q, p] table = pd.DataFrame(data, columns=["lag", "AC", "Q", "Prob(>Q)"]) print(table.set_index("lag")) """ Explanation: Does our model obey the theory? End of explanation """ predict_sunspots = arma_mod30.predict("1990", "2012", dynamic=True) print(predict_sunspots) def mean_forecast_err(y, yhat): return y.sub(yhat).mean() mean_forecast_err(dta.SUNACTIVITY, predict_sunspots) """ Explanation: This indicates a lack of fit. In-sample dynamic prediction. How good does our model do? End of explanation """ from statsmodels.tsa.arima_process import ArmaProcess np.random.seed(1234) # include zero-th lag arparams = np.array([1, 0.75, -0.65, -0.55, 0.9]) maparams = np.array([1, 0.65]) """ Explanation: Exercise: Can you obtain a better fit for the Sunspots model? (Hint: sm.tsa.AR has a method select_order) Simulated ARMA(4,1): Model Identification is Difficult End of explanation """ arma_t = ArmaProcess(arparams, maparams) arma_t.isinvertible arma_t.isstationary """ Explanation: Let's make sure this model is estimable. End of explanation """ fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111) ax.plot(arma_t.generate_sample(nsample=50)) arparams = np.array([1, 0.35, -0.15, 0.55, 0.1]) maparams = np.array([1, 0.65]) arma_t = ArmaProcess(arparams, maparams) arma_t.isstationary arma_rvs = arma_t.generate_sample(nsample=500, burnin=250, scale=2.5) fig = plt.figure(figsize=(12, 8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(arma_rvs, lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(arma_rvs, lags=40, ax=ax2) """ Explanation: What does this mean? End of explanation """ lags = int(10 * np.log10(arma_rvs.shape[0])) arma11 = ARIMA(arma_rvs, order=(1, 0, 1)).fit() resid = arma11.resid r, q, p = sm.tsa.acf(resid, nlags=lags, fft=True, qstat=True) data = np.c_[range(1, lags + 1), r[1:], q, p] table = pd.DataFrame(data, columns=["lag", "AC", "Q", "Prob(>Q)"]) print(table.set_index("lag")) arma41 = ARIMA(arma_rvs, order=(4, 0, 1)).fit() resid = arma41.resid r, q, p = sm.tsa.acf(resid, nlags=lags, fft=True, qstat=True) data = np.c_[range(1, lags + 1), r[1:], q, p] table = pd.DataFrame(data, columns=["lag", "AC", "Q", "Prob(>Q)"]) print(table.set_index("lag")) """ Explanation: For mixed ARMA processes the Autocorrelation function is a mixture of exponentials and damped sine waves after (q-p) lags. The partial autocorrelation function is a mixture of exponentials and dampened sine waves after (p-q) lags. End of explanation """ macrodta = sm.datasets.macrodata.load_pandas().data macrodta.index = pd.Index(sm.tsa.datetools.dates_from_range("1959Q1", "2009Q3")) cpi = macrodta["cpi"] """ Explanation: Exercise: How good of in-sample prediction can you do for another series, say, CPI End of explanation """ fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111) ax = cpi.plot(ax=ax) ax.legend() """ Explanation: Hint: End of explanation """ print(sm.tsa.adfuller(cpi)[1]) """ Explanation: P-value of the unit-root test, resoundingly rejects the null of a unit-root. End of explanation """
NeuroDataDesign/pan-synapse
pipeline_3/background/non_maxima_supression.ipynb
apache-2.0
import sys import scipy.io as sio import glob import numpy as np import matplotlib.pyplot as plt from skimage.filters import threshold_otsu sys.path.append('../code/functions') import qaLib as qLib sys.path.append('../../pipeline_1/code/functions') import connectLib as cLib from IPython.display import Image import random from connectLib import otsuVox """ Explanation: Imports End of explanation """ Image(filename = "images/nonMaxima.png") """ Explanation: Algorithm Pseudocode End of explanation """ def nonMaximaSupression(clusterList, image, z): randClusterDist = [] for i in range(100000): point = [int(random.random()*image.shape[0]), int(random.random()*image.shape[1]), int(random.random()*image.shape[2])] randClusterDist.append(image[point[0]][point[1]][point[2]]) mu = np.average(randClusterDist) sigma = np.std(randClusterDist) aveList = [] for cluster in clusterList: curClusterDist = [] for member in cluster.members: curClusterDist.append(image[member[0]][member[1]][member[2]]) aveList.append(np.mean(curClusterDist)) finalClusters = [] for i in range(len(aveList)): #this is bad and i should feel bad if (aveList[i] - mu)/float(sigma) > z: finalClusters.append(clusterList[i]) return finalClusters """ Explanation: Actual Code End of explanation """ simEasyGrid = np.zeros((100, 100, 100)) for i in range(4): for j in range(4): for k in range(4): simEasyGrid[20*(2*j): 20*(2*j + 1), 20*(2*i): 20*(2*i + 1), 20*(2*k): 20*(2*k + 1)] = i + j + k + 1 plt.imshow(simEasyGrid[5]) plt.axis('off') plt.title('Easy Data Raw Plot at z=0') plt.show() plt.hist(simEasyGrid[0]) plt.title("Histogram of Easy Data") plt.show() """ Explanation: Algorithm Conditions We believe nonMaxSupression will perform well if and only if the histogram of the data is capable of producing z-scores - i.e. there is variance in the brightness. Data on which nonMaxSupression will perform well and poorly The data set on which nonMaxSupression will perform well is a gradient image. We are trying to extract anything with a z-score above 7, and this should clearly extract that. The data set on which nonMaxSupression will perform poorly is a linear image. It will perform poorly because the data does not follow a normal curve. Raw Data Plot Good Data End of explanation """ simDiff = np.zeros((100, 100, 100)) for i in range(100): for j in range(100): for k in range(100): simDiff[i][j][k] = 100 plt.imshow(simDiff[5]) plt.axis('off') plt.title('Challenging Data Raw Plot at z=0') plt.show() plt.hist(simDiff[0], bins=20) plt.title("Histogram of Challenging Data") plt.show() """ Explanation: Prediction on Good Data I predict that nonMaxSupression will pick up the higher range of clusters. Challenging Data End of explanation """ simEasyGrid = np.zeros((100, 100, 100)) for i in range(4): for j in range(4): for k in range(4): simEasyGrid[20*(2*j): 20*(2*j + 1), 20*(2*i): 20*(2*i + 1), 20*(2*k): 20*(2*k + 1)] = i + j + k + 1 plt.imshow(simEasyGrid[5]) plt.axis('off') plt.title('Easy Data Raw Plot at z=0') plt.show() plt.hist(simEasyGrid[0]) plt.title("Histogram of Easy Data") plt.show() """ Explanation: Prediction on Challenging Data I predict that nonMaxSupression will will not be able to calculate z scores, and thus will fail. Simulation Generate Toy Simulation Data Easy Data I believe this data will look la grid of different-colored squares. End of explanation """ simDiff = np.zeros((100, 100, 100)) for i in range(100): for j in range(100): for k in range(100): simDiff[i][j][k] = 100 plt.imshow(simDiff[5]) plt.axis('off') plt.title('Challenging Data Raw Plot at z=0') plt.show() plt.hist(simDiff[0], bins=20) plt.title("Histogram of Challenging Data") plt.show() """ Explanation: The easy data looks exactly as I expected. The histogram is has deviation, meaning nonMaxSupression will be able to extract maxima. Difficult Data I expect that the difficult data will look like a constant image. End of explanation """ otsuOutEasy = otsuVox(simEasyGrid) otsuClustersEasy = cLib.clusterThresh(otsuOutEasy, 0, 1000000) nonMaxClusters = nonMaximaSupression(otsuClustersEasy, simEasyGrid, 1) nonMaxEasy = np.zeros_like(simEasy) for cluster in nonMaxClusters: for member in cluster.members: nonMaxEasy[member[0]][member[1]][member[2]] = 1 plt.imshow(nonMaxEasy[5]) plt.axis('off') plt.title('Non Max Supression Output for Easy Data Slice at z=5') plt.show() """ Explanation: The difficult data looks exactly as I expected. The histogram is a single value, which is the kind of data nonMaxSupression fails on. Toy Data Analysis For the good data, I predict that otsuVox will select the voxels in the rightmost normal curve. For the bad data, I predict that otsuVox will randomly select one normal curve. End of explanation """ otsuOutDiff = otsuVox(simDiff) otsuClustersDiff = cLib.clusterThresh(otsuOutDiff, 0, 1000000) nonMaxClusters = nonMaximaSupression(otsuClustersDiff, simDiff, 0) nonMaxDiff = np.zeros_like(simDiff) for cluster in nonMaxClusters: for member in cluster.members: nonMaxDiff[member[0]][member[1]][member[2]] = 1 plt.imshow(nonMaxDiff[5]) plt.axis('off') plt.title('Non Max Supression Output for Difficult Data Slice at z=5') plt.show() """ Explanation: As expected, otsuVox picked up just the brightest clusters. End of explanation """ procData = [] for mat in glob.glob('../../data/matlabData/collman15v2/*_p1.mat'): name = mat[34:-7] rawData = sio.loadmat(mat) npData = np.rollaxis(rawData[name], 2, 0) procData.append([name, npData]) realData = procData[12][1] otsuOutReal = otsuVox(realData) plt.imshow(otsuOutReal[0], cmap='gray') plt.title('Real Data otsuVox Output At Slice 0') plt.axis('off') plt.show() plt.hist(otsuOutReal[0]) plt.title("Histogram of Post-Otsu Data") plt.show() """ Explanation: As expected, otsuVox failed to pick out bright things because there was no deviation in the image. Real Data End of explanation """ otsuClusters = cLib.clusterThresh(otsuOutReal, 0, 10000000) nonMaxClusters = nonMaximaSupression(otsuClusters, realData, 6) nonMaxImg = np.zeros_like(realData) for cluster in nonMaxClusters: for member in cluster.members: nonMaxImg[member[0]][member[1]][member[2]] = 1 plt.imshow(nonMaxImg[0], cmap='gray') plt.title('NonMaximaSupression Output At Slice 0') plt.axis('off') plt.show() """ Explanation: As we can see, the real data has a mean and a standard deviation. This means that nonMaximaSupression should be able to extract the bright spots. End of explanation """ labelClusters = cLib.clusterThresh(procData[0][1], 0, 10000000) otsuClusters = cLib.clusterThresh(otsuOutReal, 0, 10000000) precision, recall, F1 = qLib.precision_recall_f1(labelClusters, otsuClusters) print 'Precision: ' + str(precision) print 'Recall: ' + str(recall) print 'F1: ' + str(F1) """ Explanation: Precision/Recall/F1 before nonMaximaSupression End of explanation """ precision, recall, F1 = qLib.precision_recall_f1(labelClusters, nonMaxClusters) print 'Precision: ' + str(precision) print 'Recall: ' + str(recall) print 'F1: ' + str(F1) """ Explanation: Precision/Recall/F1 after nonMaximaSupression End of explanation """
Astrohackers-TW/IANCUPythonAdventure
notebooks/notebooks4beginners/04_python_tutorial_sci-packages2.ipynb
mit
from scipy.optimize import curve_fit import numpy as np import matplotlib.pyplot as plt %matplotlib inline time = np.linspace(0, 10, 200) counts = 50 * np.sin(2 * np.pi * 1. / 2.5 * time) + 100 + np.random.normal(0, 5., len(time)) plt.plot(time, counts, 'k.') counts_err = 4 * np.random.rand(len(time)) + 1 plt.errorbar(time, counts, yerr=counts_err, fmt="r") def lc_model(time, amplitude, period, dc): return amplitude * np.sin(2 * np.pi * 1. / period * time) + dc popt, pcov = curve_fit(lc_model, time, counts, p0=[50, 2.5, 100], sigma=counts_err) print(popt) print(pcov) perr = np.sqrt(np.diag(pcov)) print("Amplitude =", popt[0], "+/-", perr[0]) print("Period =", popt[1], "+/-", perr[1]) print("DC =", popt[2], "+/-", perr[2]) plt.errorbar(time, counts, yerr=counts_err, fmt="none", label='Data') plt.plot(time, lc_model(time, popt[0], popt[1], popt[2]),'r-', label='Model') plt.xlabel('Time (hour)') plt.ylabel('Counts') plt.legend() """ Explanation: Python 基本語法與科孞蚈算套件的䜿甚 Python科孞蚈算套件(二) 四堂課皋倧綱 第䞀堂-Python 基瀎(侀)Python 簡介及環境建立、Python皋匏的線寫及執行、資料型態、基本茞入茞出、流皋控制 第二堂-Python 基瀎(二)檔案讀寫、䟋倖處理、凜敞、暡組、物件導向 第䞉堂-Python科孞蚈算套件(侀)Numpy、Matplotlib 第四堂-Python科孞蚈算套件(二)Scipy、Astropy Scipy Scipy䞭的暡組 特殊凜敞 (<a href="http://docs.scipy.org/doc/scipy/reference/tutorial/special.html" target="_blank">scipy.special</a>) 敞倌積分/解埮分方皋 (<a href="http://docs.scipy.org/doc/scipy/reference/tutorial/integrate.html" target="_blank">scipy.integrate</a>) 最䜳化/擬合/求根 (<a href="http://docs.scipy.org/doc/scipy/reference/tutorial/optimize.html" target="_blank">scipy.optimize</a>) 內插 (<a href="http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html" target="_blank">scipy.interpolate</a>) Fourier 蜉換 (<a href="http://docs.scipy.org/doc/scipy/reference/tutorial/fftpack.html" target="_blank">scipy.fftpack</a>) 蚊號處理 (<a href="http://docs.scipy.org/doc/scipy/reference/tutorial/signal.html" target="_blank">scipy.signal</a>) 線性代敞 (<a href="http://docs.scipy.org/doc/scipy/reference/tutorial/linalg.html" target="_blank">scipy.linalg</a>) 機率分䜈/統蚈凜敞 (<a href="http://docs.scipy.org/doc/scipy/reference/tutorial/stats.html" target="_blank">scipy.stats</a>) 倚維圱像處理 (<a href="http://docs.scipy.org/doc/scipy/reference/tutorial/ndimage.html" target="_blank">scipy.ndimage</a>) ... Scipy範䟋曲線擬合 End of explanation """ from astropy.io import fits hdulist = fits.open("XTE_J1550_564_30191011500A_2_13kev_0.01s_0_2505s.fits") hdulist.info() primary = hdulist[0] primary.header print(primary.header['OBJECT ']) rate = hdulist[1] rate.data print(rate.data.shape) print(rate.data.field(1)) time = rate.data.field(0)[:500] counts = rate.data.field(1)[:500] plt.plot(time, counts) plt.xlabel('Time (s)') plt.ylabel('Rate (count/s)') hdulist.close() """ Explanation: Astropy Astropy䞭的暡組 物理垞敞及單䜍換算 (<a href="http://docs.astropy.org/en/stable/constants/index.html" target="_blank">astropy.constants</a>, <a href="http://docs.astropy.org/en/stable/units/index.html" target="_blank">astropy.units</a>) 資料衚栌操䜜 (<a href="http://docs.astropy.org/en/stable/table/index.html" target="_blank">astropy.table</a>) 倩球座暙系統蜉換 (<a href="http://docs.astropy.org/en/stable/coordinates/index.html" target="_blank">astropy.coordinates</a>) 資料擬合 (<a href="http://docs.astropy.org/en/stable/modeling/index.html" target="_blank">astropy.modeling</a>) FITS檔操䜜 (<a href="http://docs.astropy.org/en/stable/io/fits/index.html" target="_blank">astropy.io.fits</a>) ... Astropy範䟋FITS檔操䜜 End of explanation """
amueller/scipy-2017-sklearn
notebooks/16.Performance_metrics_and_Model_Evaluation.ipynb
cc0-1.0
%matplotlib inline import matplotlib.pyplot as plt import numpy as np np.set_printoptions(precision=2) from sklearn.datasets import load_digits from sklearn.model_selection import train_test_split from sklearn.svm import LinearSVC digits = load_digits() X, y = digits.data, digits.target X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, stratify=y, test_size=0.25) classifier = LinearSVC(random_state=1).fit(X_train, y_train) y_test_pred = classifier.predict(X_test) print("Accuracy: {}".format(classifier.score(X_test, y_test))) """ Explanation: Model Evaluation, Scoring Metrics, and Dealing with Imbalanced Classes In the previous notebook, we already went into some detail on how to evaluate a model and how to pick the best model. So far, we assumed that we were given a performance measure, a measure of the quality of the model. What measure one should use is not always obvious, though. The default scores in scikit-learn are accuracy for classification, which is the fraction of correctly classified samples, and r2 for regression, with is the coefficient of determination. These are reasonable default choices in many scenarious; however, depending on our task, these are not always the definitive or recommended choices. Let's take look at classification in more detail, going back to the application of classifying handwritten digits. So, how about training a classifier and walking through the different ways we can evaluate it? Scikit-learn has many helpful methods in the sklearn.metrics module that can help us with this task: End of explanation """ from sklearn.metrics import confusion_matrix confusion_matrix(y_test, y_test_pred) """ Explanation: Here, we predicted 95.3% of samples correctly. For multi-class problems, it is often interesting to know which of the classes are hard to predict, and which are easy, or which classes get confused. One way to get more information about misclassifications is the confusion_matrix, which shows for each true class, how frequent a given predicted outcome is. End of explanation """ plt.matshow(confusion_matrix(y_test, y_test_pred), cmap="Blues") plt.colorbar(shrink=0.8) plt.xticks(range(10)) plt.yticks(range(10)) plt.xlabel("Predicted label") plt.ylabel("True label"); """ Explanation: A plot is sometimes more readable: End of explanation """ from sklearn.metrics import classification_report print(classification_report(y_test, y_test_pred)) """ Explanation: We can see that most entries are on the diagonal, which means that we predicted nearly all samples correctly. The off-diagonal entries show us that many eights were classified as ones, and that nines are likely to be confused with many other classes. Another useful function is the classification_report which provides precision, recall, fscore and support for all classes. Precision is how many of the predictions for a class are actually that class. With TP, FP, TN, FN standing for "true positive", "false positive", "true negative" and "false negative" repectively: Precision = TP / (TP + FP) Recall is how many of the true positives were recovered: Recall = TP / (TP + FN) F1-score is the geometric average of precision and recall: F1 = 2 x (precision x recall) / (precision + recall) The values of all these values above are in the closed interval [0, 1], where 1 means a perfect score. End of explanation """ np.bincount(y) / y.shape[0] """ Explanation: These metrics are helpful in two particular cases that come up often in practice: 1. Imbalanced classes, that is one class might be much more frequent than the other. 2. Asymmetric costs, that is one kind of error is much more "costly" than the other. Let's have a look at 1. first. Say we have a class imbalance of 1:9, which is rather mild (think about ad-click-prediction where maybe 0.001% of ads might be clicked): End of explanation """ X, y = digits.data, digits.target == 3 """ Explanation: As a toy example, let's say we want to classify the digits three against all other digits: End of explanation """ from sklearn.model_selection import cross_val_score from sklearn.svm import SVC cross_val_score(SVC(), X, y) """ Explanation: Now we run cross-validation on a classifier to see how well it does: End of explanation """ from sklearn.dummy import DummyClassifier cross_val_score(DummyClassifier("most_frequent"), X, y) """ Explanation: Our classifier is 90% accurate. Is that good? Or bad? Keep in mind that 90% of the data is "not three". So let's see how well a dummy classifier does, that always predicts the most frequent class: End of explanation """ np.bincount(y) / y.shape[0] """ Explanation: Also 90% (as expected)! So one might thing that means our classifier is not very good, it doesn't to better than a simple strategy that doesn't even look at the data. That would be judging too quickly, though. Accuracy is simply not a good way to evaluate classifiers for imbalanced datasets! End of explanation """ from sklearn.metrics import roc_curve, roc_auc_score X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) for gamma in [.01, .05, 1]: plt.xlabel("False Positive Rate") plt.ylabel("True Positive Rate (recall)") svm = SVC(gamma=gamma).fit(X_train, y_train) decision_function = svm.decision_function(X_test) fpr, tpr, _ = roc_curve(y_test, decision_function) acc = svm.score(X_test, y_test) auc = roc_auc_score(y_test, svm.decision_function(X_test)) plt.plot(fpr, tpr, label="acc:%.2f auc:%.2f" % (acc, auc), linewidth=3) plt.legend(loc="best"); """ Explanation: ROC Curves A much better measure is using the so-called ROC (Receiver operating characteristics) curve. A roc-curve works with uncertainty outputs of a classifier, say the "decision_function" of the SVC we trained above. Instead of making a cut-off at zero and looking at classification outcomes, it looks at every possible cut-off and records how many true positive predictions there are, and how many false positive predictions there are. The following plot compares the roc curve of three parameter settings of our classifier on the "three vs rest" task. End of explanation """ from sklearn.model_selection import cross_val_score cross_val_score(SVC(), X, y, scoring="roc_auc") """ Explanation: With a very small decision threshold, there will be few false positives, but also few false negatives, while with a very high threshold, both true positive rate and false positive rate will be high. So in general, the curve will be from the lower left to the upper right. A diagonal line reflects chance performance, while the goal is to be as much in the top left corner as possible. This means giving a higher decision_function value to all positive samples than to any negative sample. In this sense, this curve only considers the ranking of the positive and negative samples, not the actual value. As you can see from the curves and the accuracy values in the legend, even though all classifiers have the same accuracy, 89%, which is even lower than the dummy classifier, one of them has a perfect roc curve, while one of them performs on chance level. For doing grid-search and cross-validation, we usually want to condense our model evaluation into a single number. A good way to do this with the roc curve is to use the area under the curve (AUC). We can simply use this in cross_val_score by specifying scoring="roc_auc": End of explanation """ from sklearn.metrics.scorer import SCORERS print(SCORERS.keys()) """ Explanation: Built-In and custom scoring functions There are many more scoring methods available, which are useful for different kinds of tasks. You can find them in the "SCORERS" dictionary. The only documentation explains all of them. End of explanation """ def my_accuracy_scoring(est, X, y): return np.mean(est.predict(X) == y) cross_val_score(SVC(), X, y, scoring=my_accuracy_scoring) """ Explanation: It is also possible to define your own scoring metric. Instead of a string, you can provide a callable to as scoring parameter, that is an object with a __call__ method or a function. It needs to take a model, a test-set features X_test and test-set labels y_test, and return a float. Higher floats are taken to mean better models. Let's reimplement the standard accuracy score: End of explanation """ y_true = np.array([0, 0, 0, 1, 1, 1, 1, 1, 2, 2]) y_pred = np.array([0, 1, 1, 0, 1, 1, 2, 2, 2, 2]) confusion_matrix(y_true, y_pred) # %load solutions/16A_avg_per_class_acc.py """ Explanation: <div class="alert alert-success"> <b>EXERCISE</b>: <ul> <li> In previous sections, we typically used the accuracy measure to evaluate the performance of our classifiers. A related measure that we haven't talked about, yet, is the average-per-class accuracy (APCA). As we remember, the accuracy is defined as $$ACC = \frac{TP+TN}{n},$$ where *n* is the total number of samples. This can be generalized to $$ACC = \frac{T}{n},$$ where *T* is the number of all correct predictions in multi-class settings. </li> </ul> ![](figures/average-per-class.png) <li> Given the following arrays of "true" class labels and predicted class labels, can you implement a function that uses the accuracy measure to compute the average-per-class accuracy as shown below? </li> </div> End of explanation """
GoogleCloudDataproc/cloud-dataproc
notebooks/python/3.1. Spark DataFrame & Pandas Plotting - Python.ipynb
apache-2.0
!scala -version """ Explanation: 3.1. Spark DataFrames & Pandas Plotting - Python Create Dataproc Cluster with Jupyter This notebook is designed to be run on Google Cloud Dataproc. Follow the links below for instructions on how to create a Dataproc Cluster with the Juypter component installed. Tutorial - Install and run a Jupyter notebook on a Dataproc cluster Blog post - Apache Spark and Jupyter Notebooks made easy with Dataproc component gateway Python 3 Kernel Use a Python 3 kernel (not PySpark) to allow you to configure the SparkSession in the notebook and include the spark-bigquery-connector required to use the BigQuery Storage API. Scala Version Check what version of Scala you are running so you can include the correct spark-bigquery-connector jar End of explanation """ from pyspark.sql import SparkSession spark = SparkSession.builder \ .appName('Spark DataFrames & Pandas Plotting')\ .config('spark.jars', 'gs://spark-lib/bigquery/spark-bigquery-latest.jar') \ .getOrCreate() """ Explanation: Create Spark Session Include the correct version of the spark-bigquery-connector jar Scala version 2.11 - 'gs://spark-lib/bigquery/spark-bigquery-latest.jar'. Scala version 2.12 - 'gs://spark-lib/bigquery/spark-bigquery-latest_2.12.jar'. End of explanation """ spark.conf.set("spark.sql.repl.eagerEval.enabled",True) """ Explanation: Enable repl.eagerEval This will output the results of DataFrames in each step without the new need to show df.show() and also improves the formatting of the output End of explanation """ table = "bigquery-public-data.wikipedia.pageviews_2020" df_wiki_pageviews = spark.read \ .format("bigquery") \ .option("table", table) \ .option("filter", "datehour >= '2020-03-01' AND datehour < '2020-03-02'") \ .load() df_wiki_pageviews.printSchema() """ Explanation: Read BigQuery table into Spark DataFrame Use filter() to query data from a partitioned table. End of explanation """ df_wiki_en = df_wiki_pageviews \ .select("datehour", "wiki", "views") \ .where("views > 1000 AND wiki in ('en', 'en.m')") \ .cache() df_wiki_en """ Explanation: Select required columns and apply a filter using where() which is an alias for filter() then cache the table End of explanation """ import pyspark.sql.functions as F df_datehour_totals = df_wiki_en \ .groupBy("datehour") \ .agg(F.sum('views').alias('total_views')) df_datehour_totals.orderBy('total_views', ascending=False) """ Explanation: Group by title and order by page views to see the top pages End of explanation """ spark.conf.set("spark.sql.execution.arrow.enabled", "true") %time pandas_datehour_totals = df_datehour_totals.toPandas() pandas_datehour_totals.set_index('datehour', inplace=True) pandas_datehour_totals.head() """ Explanation: Convert Spark DataFrame to Pandas DataFrame Convert the Spark DataFrame to Pandas DataFrame and set the datehour as the index End of explanation """ import matplotlib.pyplot as plt """ Explanation: Plotting Pandas Dataframe Import matplotlib End of explanation """ pandas_datehour_totals.plot(kind='line',figsize=(12,6)); """ Explanation: Use the Pandas plot function to create a line chart End of explanation """ import pyspark.sql.functions as F df_wiki_totals = df_wiki_en \ .groupBy("datehour") \ .pivot("wiki") \ .agg(F.sum('views').alias('total_views')) df_wiki_totals """ Explanation: Plot Multiple Columns Create a new Spark DataFrame and pivot the wiki column to create multiple rows for each wiki value End of explanation """ pandas_wiki_totals = df_wiki_totals.toPandas() pandas_wiki_totals.set_index('datehour', inplace=True) pandas_wiki_totals.head() """ Explanation: Convert to Pandas DataFrame End of explanation """ pandas_wiki_totals.plot(kind='line',figsize=(12,6)) """ Explanation: Create plot with line for each column End of explanation """ pandas_wiki_totals.plot.area(figsize=(12,6)) """ Explanation: Create stacked area plot End of explanation """
jrg365/gpytorch
examples/04_Variational_and_Approximate_GPs/Non_Gaussian_Likelihoods.ipynb
mit
import math import torch import gpytorch from matplotlib import pyplot as plt %matplotlib inline """ Explanation: Non-Gaussian Likelihoods Introduction This example is the simplest form of using an RBF kernel in an ApproximateGP module for classification. This basic model is usable when there is not much training data and no advanced techniques are required. In this example, we’re modeling a unit wave with period 1/2 centered with positive values @ x=0. We are going to classify the points as either +1 or -1. Variational inference uses the assumption that the posterior distribution factors multiplicatively over the input variables. This makes approximating the distribution via the KL divergence possible to obtain a fast approximation to the posterior. For a good explanation of variational techniques, sections 4-6 of the following may be useful: https://www.cs.princeton.edu/courses/archive/fall11/cos597C/lectures/variational-inference-i.pdf End of explanation """ train_x = torch.linspace(0, 1, 10) train_y = torch.sign(torch.cos(train_x * (4 * math.pi))).add(1).div(2) """ Explanation: Set up training data In the next cell, we set up the training data for this example. We'll be using 10 regularly spaced points on [0,1] which we evaluate the function on and add Gaussian noise to get the training labels. Labels are unit wave with period 1/2 centered with positive values @ x=0. End of explanation """ from gpytorch.models import ApproximateGP from gpytorch.variational import CholeskyVariationalDistribution from gpytorch.variational import UnwhitenedVariationalStrategy class GPClassificationModel(ApproximateGP): def __init__(self, train_x): variational_distribution = CholeskyVariationalDistribution(train_x.size(0)) variational_strategy = UnwhitenedVariationalStrategy( self, train_x, variational_distribution, learn_inducing_locations=False ) super(GPClassificationModel, self).__init__(variational_strategy) self.mean_module = gpytorch.means.ConstantMean() self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel()) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) latent_pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x) return latent_pred # Initialize model and likelihood model = GPClassificationModel(train_x) likelihood = gpytorch.likelihoods.BernoulliLikelihood() """ Explanation: Setting up the classification model The next cell demonstrates the simplest way to define a classification Gaussian process model in GPyTorch. If you have already done the GP regression tutorial, you have already seen how GPyTorch model construction differs from other GP packages. In particular, the GP model expects a user to write out a forward method in a way analogous to PyTorch models. This gives the user the most possible flexibility. Since exact inference is intractable for GP classification, GPyTorch approximates the classification posterior using variational inference. We believe that variational inference is ideal for a number of reasons. Firstly, variational inference commonly relies on gradient descent techniques, which take full advantage of PyTorch's autograd. This reduces the amount of code needed to develop complex variational models. Additionally, variational inference can be performed with stochastic gradient decent, which can be extremely scalable for large datasets. If you are unfamiliar with variational inference, we recommend the following resources: - Variational Inference: A Review for Statisticians by David M. Blei, Alp Kucukelbir, Jon D. McAuliffe. - Scalable Variational Gaussian Process Classification by James Hensman, Alex Matthews, Zoubin Ghahramani. In this example, we're using an UnwhitenedVariationalStrategy because we are using the training data as inducing points. In general, you'll probably want to use the standard VariationalStrategy class for improved optimization. End of explanation """ # this is for running the notebook in our testing framework import os smoke_test = ('CI' in os.environ) training_iterations = 2 if smoke_test else 50 # Find optimal model hyperparameters model.train() likelihood.train() # Use the adam optimizer optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # "Loss" for GPs - the marginal log likelihood # num_data refers to the number of training datapoints mll = gpytorch.mlls.VariationalELBO(likelihood, model, train_y.numel()) for i in range(training_iterations): # Zero backpropped gradients from previous iteration optimizer.zero_grad() # Get predictive output output = model(train_x) # Calc loss and backprop gradients loss = -mll(output, train_y) loss.backward() print('Iter %d/%d - Loss: %.3f' % (i + 1, training_iterations, loss.item())) optimizer.step() """ Explanation: Model modes Like most PyTorch modules, the ExactGP has a .train() and .eval() mode. - .train() mode is for optimizing variational parameters model hyperameters. - .eval() mode is for computing predictions through the model posterior. Learn the variational parameters (and other hyperparameters) In the next cell, we optimize the variational parameters of our Gaussian process. In addition, this optimization loop also performs Type-II MLE to train the hyperparameters of the Gaussian process. End of explanation """ # Go into eval mode model.eval() likelihood.eval() with torch.no_grad(): # Test x are regularly spaced by 0.01 0,1 inclusive test_x = torch.linspace(0, 1, 101) # Get classification predictions observed_pred = likelihood(model(test_x)) # Initialize fig and axes for plot f, ax = plt.subplots(1, 1, figsize=(4, 3)) ax.plot(train_x.numpy(), train_y.numpy(), 'k*') # Get the predicted labels (probabilites of belonging to the positive class) # Transform these probabilities to be 0/1 labels pred_labels = observed_pred.mean.ge(0.5).float() ax.plot(test_x.numpy(), pred_labels.numpy(), 'b') ax.set_ylim([-1, 2]) ax.legend(['Observed Data', 'Mean']) """ Explanation: Make predictions with the model In the next cell, we make predictions with the model. To do this, we simply put the model and likelihood in eval mode, and call both modules on the test data. In .eval() mode, when we call model() - we get GP's latent posterior predictions. These will be MultivariateNormal distributions. But since we are performing binary classification, we want to transform these outputs to classification probabilities using our likelihood. When we call likelihood(model()), we get a torch.distributions.Bernoulli distribution, which represents our posterior probability that the data points belong to the positive class. ```python f_preds = model(test_x) y_preds = likelihood(model(test_x)) f_mean = f_preds.mean f_samples = f_preds.sample(sample_shape=torch.Size((1000,)) ``` End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/miroc/cmip6/models/sandbox-3/land.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'miroc', 'sandbox-3', 'land') """ Explanation: ES-DOC CMIP6 Model Properties - Land MIP Era: CMIP6 Institute: MIROC Source ID: SANDBOX-3 Topic: Land Sub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes. Properties: 154 (96 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-20 15:02:41 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Conservation Properties 3. Key Properties --&gt; Timestepping Framework 4. Key Properties --&gt; Software Properties 5. Grid 6. Grid --&gt; Horizontal 7. Grid --&gt; Vertical 8. Soil 9. Soil --&gt; Soil Map 10. Soil --&gt; Snow Free Albedo 11. Soil --&gt; Hydrology 12. Soil --&gt; Hydrology --&gt; Freezing 13. Soil --&gt; Hydrology --&gt; Drainage 14. Soil --&gt; Heat Treatment 15. Snow 16. Snow --&gt; Snow Albedo 17. Vegetation 18. Energy Balance 19. Carbon Cycle 20. Carbon Cycle --&gt; Vegetation 21. Carbon Cycle --&gt; Vegetation --&gt; Photosynthesis 22. Carbon Cycle --&gt; Vegetation --&gt; Autotrophic Respiration 23. Carbon Cycle --&gt; Vegetation --&gt; Allocation 24. Carbon Cycle --&gt; Vegetation --&gt; Phenology 25. Carbon Cycle --&gt; Vegetation --&gt; Mortality 26. Carbon Cycle --&gt; Litter 27. Carbon Cycle --&gt; Soil 28. Carbon Cycle --&gt; Permafrost Carbon 29. Nitrogen Cycle 30. River Routing 31. River Routing --&gt; Oceanic Discharge 32. Lakes 33. Lakes --&gt; Method 34. Lakes --&gt; Wetlands 1. Key Properties Land surface key properties 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of land surface model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of land surface model code (e.g. MOSES2.2) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.3. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "water" # "energy" # "carbon" # "nitrogen" # "phospherous" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.4. Land Atmosphere Flux Exchanges Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Fluxes exchanged with the atmopshere. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.5. Atmospheric Coupling Treatment Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.land_cover') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "bare soil" # "urban" # "lake" # "land ice" # "lake ice" # "vegetated" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.6. Land Cover Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Types of land cover defined in the land surface model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.land_cover_change') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.7. Land Cover Change Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how land cover change is managed (e.g. the use of net or gross transitions) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.8. Tiling Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.conservation_properties.energy') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Conservation Properties TODO 2.1. Energy Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how energy is conserved globally and to what level (e.g. within X [units]/year) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.conservation_properties.water') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Water Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how water is conserved globally and to what level (e.g. within X [units]/year) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Carbon Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how carbon is conserved globally and to what level (e.g. within X [units]/year) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Timestepping Framework TODO 3.1. Timestep Dependent On Atmosphere Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is a time step dependent on the frequency of atmosphere coupling? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overall timestep of land surface model (i.e. time between calls) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. Timestepping Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General description of time stepping method and associated time step(s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Software Properties Software properties of land surface code 4.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.grid.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Grid Land surface grid 5.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of the grid in the land surface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.grid.horizontal.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Grid --&gt; Horizontal The horizontal grid in the land surface 6.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the general structure of the horizontal grid (not including any tiling) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.2. Matches Atmosphere Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the horizontal grid match the atmosphere? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.grid.vertical.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Grid --&gt; Vertical The vertical grid in the soil 7.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the general structure of the vertical grid in the soil (not including any tiling) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.grid.vertical.total_depth') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 7.2. Total Depth Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The total depth of the soil (in metres) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Soil Land surface soil 8.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of soil in the land surface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.heat_water_coupling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.2. Heat Water Coupling Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the coupling between heat and water in the soil End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.number_of_soil layers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 8.3. Number Of Soil layers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of soil layers End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.4. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List the prognostic variables of the soil scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Soil --&gt; Soil Map Key properties of the land surface soil map 9.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General description of soil map End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.structure') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.2. Structure Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the soil structure map End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.texture') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.3. Texture Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the soil texture map End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.organic_matter') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.4. Organic Matter Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the soil organic matter map End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.albedo') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.5. Albedo Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the soil albedo map End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.water_table') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.6. Water Table Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the soil water table map, if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 9.7. Continuously Varying Soil Depth Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the soil properties vary continuously with depth? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.soil_map.soil_depth') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.8. Soil Depth Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the soil depth map End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 10. Soil --&gt; Snow Free Albedo TODO 10.1. Prognostic Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is snow free albedo prognostic? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.snow_free_albedo.functions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "vegetation type" # "soil humidity" # "vegetation state" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10.2. Functions Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N If prognostic, describe the dependancies on snow free albedo calculations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "distinction between direct and diffuse albedo" # "no distinction between direct and diffuse albedo" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10.3. Direct Diffuse Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If prognostic, describe the distinction between direct and diffuse albedo End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 10.4. Number Of Wavelength Bands Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If prognostic, enter the number of wavelength bands used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11. Soil --&gt; Hydrology Key properties of the land surface soil hydrology 11.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General description of the soil hydrological model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.2. Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time step of river soil hydrology in seconds End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.3. Tiling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the soil hydrology tiling, if any. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.4. Vertical Discretisation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the typical vertical discretisation End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 11.5. Number Of Ground Water Layers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of soil layers that may contain water End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "perfect connectivity" # "Darcian flow" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.6. Lateral Connectivity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Describe the lateral connectivity between tiles End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Bucket" # "Force-restore" # "Choisnel" # "Explicit diffusion" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.7. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The hydrological dynamics scheme in the land surface model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 12. Soil --&gt; Hydrology --&gt; Freezing TODO 12.1. Number Of Ground Ice Layers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How many soil layers may contain ground ice End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.2. Ice Storage Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the method of ice storage End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.3. Permafrost Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the treatment of permafrost, if any, within the land surface scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.drainage.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 13. Soil --&gt; Hydrology --&gt; Drainage TODO 13.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General describe how drainage is included in the land surface scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.hydrology.drainage.types') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Gravity drainage" # "Horton mechanism" # "topmodel-based" # "Dunne mechanism" # "Lateral subsurface flow" # "Baseflow from groundwater" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Types Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Different types of runoff represented by the land surface model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.heat_treatment.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14. Soil --&gt; Heat Treatment TODO 14.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General description of how heat treatment properties are defined End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.heat_treatment.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.2. Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time step of soil heat scheme in seconds End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.heat_treatment.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.3. Tiling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the soil heat treatment tiling, if any. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.4. Vertical Discretisation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the typical vertical discretisation End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Force-restore" # "Explicit diffusion" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.5. Heat Storage Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify the method of heat storage End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.soil.heat_treatment.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "soil moisture freeze-thaw" # "coupling with snow temperature" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.6. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Describe processes included in the treatment of soil heat End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Snow Land surface snow 15.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of snow in the land surface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Tiling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the snow tiling, if any. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.number_of_snow_layers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.3. Number Of Snow Layers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of snow levels used in the land surface scheme/model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.density') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "constant" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.4. Density Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Description of the treatment of snow density End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.water_equivalent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.5. Water Equivalent Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Description of the treatment of the snow water equivalent End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.heat_content') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.6. Heat Content Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Description of the treatment of the heat content of snow End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.temperature') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.7. Temperature Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Description of the treatment of snow temperature End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.liquid_water_content') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.8. Liquid Water Content Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Description of the treatment of snow liquid water End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.snow_cover_fractions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "ground snow fraction" # "vegetation snow fraction" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.9. Snow Cover Fractions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Specify cover fractions used in the surface snow scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "snow interception" # "snow melting" # "snow freezing" # "blowing snow" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.10. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Snow related processes in the land surface scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.11. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List the prognostic variables of the snow scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.snow_albedo.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "prescribed" # "constant" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16. Snow --&gt; Snow Albedo TODO 16.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the treatment of snow-covered land albedo End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.snow.snow_albedo.functions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "vegetation type" # "snow age" # "snow density" # "snow grain type" # "aerosol deposition" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.2. Functions Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N *If prognostic, * End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17. Vegetation Land surface vegetation 17.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of vegetation in the land surface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 17.2. Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time step of vegetation scheme in seconds End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.dynamic_vegetation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 17.3. Dynamic Vegetation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there dynamic evolution of vegetation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.4. Tiling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the vegetation tiling, if any. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.vegetation_representation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "vegetation types" # "biome types" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.5. Vegetation Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Vegetation classification used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.vegetation_types') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "broadleaf tree" # "needleleaf tree" # "C3 grass" # "C4 grass" # "vegetated" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.6. Vegetation Types Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of vegetation types in the classification, if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.biome_types') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "evergreen needleleaf forest" # "evergreen broadleaf forest" # "deciduous needleleaf forest" # "deciduous broadleaf forest" # "mixed forest" # "woodland" # "wooded grassland" # "closed shrubland" # "opne shrubland" # "grassland" # "cropland" # "wetlands" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.7. Biome Types Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of biome types in the classification, if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.vegetation_time_variation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "fixed (not varying)" # "prescribed (varying from files)" # "dynamical (varying from simulation)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.8. Vegetation Time Variation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How the vegetation fractions in each tile are varying with time End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.vegetation_map') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.9. Vegetation Map Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.interception') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 17.10. Interception Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is vegetation interception of rainwater represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.phenology') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic (vegetation map)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.11. Phenology Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Treatment of vegetation phenology End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.phenology_description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.12. Phenology Description Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 General description of the treatment of vegetation phenology End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.leaf_area_index') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prescribed" # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.13. Leaf Area Index Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Treatment of vegetation leaf area index End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.leaf_area_index_description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.14. Leaf Area Index Description Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 General description of the treatment of leaf area index End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.biomass') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.15. Biomass Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 *Treatment of vegetation biomass * End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.biomass_description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.16. Biomass Description Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 General description of the treatment of vegetation biomass End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.biogeography') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.17. Biogeography Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Treatment of vegetation biogeography End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.biogeography_description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.18. Biogeography Description Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 General description of the treatment of vegetation biogeography End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.stomatal_resistance') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "light" # "temperature" # "water availability" # "CO2" # "O3" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.19. Stomatal Resistance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Specify what the vegetation stomatal resistance depends on End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.20. Stomatal Resistance Description Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 General description of the treatment of vegetation stomatal resistance End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.vegetation.prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.21. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List the prognostic variables of the vegetation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.energy_balance.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18. Energy Balance Land surface energy balance 18.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of energy balance in land surface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.energy_balance.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.2. Tiling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the energy balance tiling, if any. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 18.3. Number Of Surface Temperatures Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.energy_balance.evaporation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "alpha" # "beta" # "combined" # "Monteith potential evaporation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18.4. Evaporation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Specify the formulation method for land surface evaporation, from soil and vegetation End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.energy_balance.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "transpiration" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18.5. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Describe which processes are included in the energy balance scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19. Carbon Cycle Land surface carbon cycle 19.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of carbon cycle in land surface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.2. Tiling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the carbon cycle tiling, if any. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 19.3. Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time step of carbon cycle in seconds End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "grand slam protocol" # "residence time" # "decay time" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19.4. Anthropogenic Carbon Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Describe the treament of the anthropogenic carbon pool End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.5. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List the prognostic variables of the carbon scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 20. Carbon Cycle --&gt; Vegetation TODO 20.1. Number Of Carbon Pools Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Enter the number of carbon pools used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20.2. Carbon Pools Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List the carbon pools used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20.3. Forest Stand Dynamics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the treatment of forest stand dyanmics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 21. Carbon Cycle --&gt; Vegetation --&gt; Photosynthesis TODO 21.1. Method Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22. Carbon Cycle --&gt; Vegetation --&gt; Autotrophic Respiration TODO 22.1. Maintainance Respiration Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the general method used for maintainence respiration End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.2. Growth Respiration Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the general method used for growth respiration End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23. Carbon Cycle --&gt; Vegetation --&gt; Allocation TODO 23.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the general principle behind the allocation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "leaves + stems + roots" # "leaves + stems + roots (leafy + woody)" # "leaves + fine roots + coarse roots + stems" # "whole plant (no distinction)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23.2. Allocation Bins Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify distinct carbon bins used in allocation End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "fixed" # "function of vegetation type" # "function of plant allometry" # "explicitly calculated" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23.3. Allocation Fractions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how the fractions of allocation are calculated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 24. Carbon Cycle --&gt; Vegetation --&gt; Phenology TODO 24.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the general principle behind the phenology scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 25. Carbon Cycle --&gt; Vegetation --&gt; Mortality TODO 25.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the general principle behind the mortality scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 26. Carbon Cycle --&gt; Litter TODO 26.1. Number Of Carbon Pools Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Enter the number of carbon pools used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 26.2. Carbon Pools Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List the carbon pools used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 26.3. Decomposition Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List the decomposition methods used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.litter.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 26.4. Method Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List the general method used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 27. Carbon Cycle --&gt; Soil TODO 27.1. Number Of Carbon Pools Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Enter the number of carbon pools used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.2. Carbon Pools Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List the carbon pools used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.3. Decomposition Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List the decomposition methods used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.soil.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.4. Method Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List the general method used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 28. Carbon Cycle --&gt; Permafrost Carbon TODO 28.1. Is Permafrost Included Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is permafrost included? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 28.2. Emitted Greenhouse Gases Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List the GHGs emitted End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 28.3. Decomposition Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List the decomposition methods used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 28.4. Impact On Soil Properties Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the impact of permafrost on soil properties End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.nitrogen_cycle.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 29. Nitrogen Cycle Land surface nitrogen cycle 29.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of the nitrogen cycle in the land surface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.nitrogen_cycle.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 29.2. Tiling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the notrogen cycle tiling, if any. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.nitrogen_cycle.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 29.3. Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time step of nitrogen cycle in seconds End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 29.4. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List the prognostic variables of the nitrogen scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 30. River Routing Land surface river routing 30.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of river routing in the land surface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.tiling') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 30.2. Tiling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the river routing, if any. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 30.3. Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time step of river routing scheme in seconds End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 30.4. Grid Inherited From Land Surface Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the grid inherited from land surface? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.grid_description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 30.5. Grid Description Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 General description of grid, if not inherited from land surface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.number_of_reservoirs') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 30.6. Number Of Reservoirs Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Enter the number of reservoirs End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.water_re_evaporation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "flood plains" # "irrigation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 30.7. Water Re Evaporation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N TODO End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 30.8. Coupled To Atmosphere Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Is river routing coupled to the atmosphere model component? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.coupled_to_land') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 30.9. Coupled To Land Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the coupling between land and rivers End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "heat" # "water" # "tracers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 30.10. Quantities Exchanged With Atmosphere Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N If couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "present day" # "adapted for other periods" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 30.11. Basin Flow Direction Map Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 What type of basin flow direction map is being used? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.flooding') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 30.12. Flooding Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the representation of flooding, if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 30.13. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List the prognostic variables of the river routing End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "direct (large rivers)" # "diffuse" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31. River Routing --&gt; Oceanic Discharge TODO 31.1. Discharge Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify how rivers are discharged to the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "heat" # "water" # "tracers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31.2. Quantities Transported Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Quantities that are exchanged from river-routing to the ocean model component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 32. Lakes Land surface lakes 32.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of lakes in the land surface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.coupling_with_rivers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 32.2. Coupling With Rivers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are lakes coupled to the river routing model component? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 32.3. Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time step of lake scheme in seconds End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "heat" # "water" # "tracers" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 32.4. Quantities Exchanged With Rivers Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N If coupling with rivers, which quantities are exchanged between the lakes and rivers End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.vertical_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 32.5. Vertical Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the vertical grid of lakes End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 32.6. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 List the prognostic variables of the lake scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.method.ice_treatment') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 33. Lakes --&gt; Method TODO 33.1. Ice Treatment Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is lake ice included? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.method.albedo') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 33.2. Albedo Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the treatment of lake albedo End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.method.dynamics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "No lake dynamics" # "vertical" # "horizontal" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 33.3. Dynamics Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Which dynamics of lakes are treated? horizontal, vertical, etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 33.4. Dynamic Lake Extent Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is a dynamic lake extent scheme included? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.method.endorheic_basins') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 33.5. Endorheic Basins Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Basins not flowing to ocean included? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.land.lakes.wetlands.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 34. Lakes --&gt; Wetlands TODO 34.1. Description Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe the treatment of wetlands, if any End of explanation """
mertnuhoglu/study
py/jupyter/Course-Introduction to Deep Learning-Coursera.ipynb
apache-2.0
import numpy as np A = np.array([[56.0, 0.0, 4.4, 68.0], [1.2,104.0,52.0,8.0], [1.8,135.0,99.0,0.9]]) print(A) cal = A.sum(axis=0) print(cal) percentage = 100*A/cal.reshape(1,4) print(percentage) """ Explanation: 14 Broadcasting example End of explanation """ import numpy as np a = np.random.randn(5) print(a) print(a.shape) print(a.T) print(np.dot(a,a.T)) a = np.random.randn(5,1) print(a) print(a.T) print(np.dot(a,a.T)) A = np.random.randn(4,3) B = np.sum(A, axis = 1, keepdims = True) print(B.shape) """ Explanation: 15 Note on numpy End of explanation """
rmenegaux/bqplot
examples/Wealth of Nations.ipynb
apache-2.0
# Required imports import pandas as pd from bqplot import (LogScale, LinearScale, OrdinalColorScale, ColorAxis, Axis, Scatter, CATEGORY10, Label, Figure) from bqplot.default_tooltip import Tooltip from ipywidgets import VBox, IntSlider, Button from IPython.display import display import os import numpy as np from time import sleep # The GUI starts with this year initial_year = 1800 """ Explanation: This is a bqplot recreation of Mike Bostock's Wealth of Nations. This was also done by Gapminder. It is originally based on a TED Talk by Hans Rosling. End of explanation """ data = pd.read_json(os.path.abspath('data_files/nations.json')) def clean_data(data): for column in ['income', 'lifeExpectancy', 'population']: data = data.drop(data[data[column].apply(len) <= 4].index) return data def extrap_interp(data): data = np.array(data) x_range = np.arange(1800, 2009, 1.) y_range = np.interp(x_range, data[:, 0], data[:, 1]) return y_range def extrap_data(data): for column in ['income', 'lifeExpectancy', 'population']: data[column] = data[column].apply(extrap_interp) return data data = clean_data(data) data = extrap_data(data) income_min, income_max = np.min(data['income'].apply(np.min)), np.max(data['income'].apply(np.max)) life_exp_min, life_exp_max = np.min(data['lifeExpectancy'].apply(np.min)), np.max(data['lifeExpectancy'].apply(np.max)) pop_min, pop_max = np.min(data['population'].apply(np.min)), np.max(data['population'].apply(np.max)) def get_data(year): year_index = year - 1800 income = data['income'].apply(lambda x: x[year_index]) life_exp = data['lifeExpectancy'].apply(lambda x: x[year_index]) pop = data['population'].apply(lambda x: x[year_index]) return income, life_exp, pop """ Explanation: Cleaning and Formatting JSON Data End of explanation """ tt = Tooltip(fields=['name', 'x', 'y'], labels=['Country Name', 'Income per Capita', 'Life Expectancy']) """ Explanation: Creating the Tooltip to display the required fields bqplot's native Tooltip allows us to simply display the data fields we require on a mouse-interaction. End of explanation """ year_label = Label(x=0.85, y=0.1, font_size='52px', font_weight='bolder', color='orange', text=str(initial_year), enable_move=True) """ Explanation: Creating the Label to display the year Staying true to the d3 recreation of the talk, we place a Label widget in the bottom-right of the Figure (it inherits the Figure co-ordinates when no scale is passed to it). With enable_move set to True, the Label can be dragged around. End of explanation """ x_sc = LogScale(min=income_min, max=income_max) y_sc = LinearScale(min=life_exp_min, max=life_exp_max) c_sc = OrdinalColorScale(domain=data['region'].unique().tolist(), colors=CATEGORY10[:6]) size_sc = LinearScale(min=pop_min, max=pop_max) ax_y = Axis(label='Life Expectancy', scale=y_sc, orientation='vertical', side='left') ax_x = Axis(label='Income per Capita', scale=x_sc) """ Explanation: Defining Axes and Scales The inherent skewness of the income data favors the use of a LogScale. Also, since the color coding by regions does not follow an ordering, we use the OrdinalColorScale. End of explanation """ # Start with the first year's data cap_income, life_exp, pop = get_data(initial_year) wealth_scat = Scatter(x=cap_income, y=life_exp, color=data['region'], size=pop, names=data['name'], display_names=False, scales={'x': x_sc, 'y': y_sc, 'color': c_sc, 'size': size_sc}, default_size=4112, tooltip=tt, animate=True, stroke='Black') """ Explanation: Creating the Scatter Mark with the appropriate size and color parameters passed To generate the appropriate graph, we need to pass the population of the country to the size attribute and its region to the color attribute. End of explanation """ fig = Figure(marks=[wealth_scat, year_label], axes=[ax_x, ax_y], title='Health and Wealth of Nations', fig_color='White', animation_duration=100) """ Explanation: Creating the Figure End of explanation """ year_slider = IntSlider(min=1800, max=2008, step=1, description='Year', value=initial_year) animate_button = Button(description='Play', background_color='MediumSeaGreen', color='Black', icon='fa-play') """ Explanation: Using a Slider to allow the user to change the year and a button for animation Here we see how we can seamlessly integrate bqplot into the jupyter widget infrastructure. End of explanation """ def year_changed(new): wealth_scat.x, wealth_scat.y, wealth_scat.size = get_data(year_slider.value) year_label.text = str(year_slider.value) year_slider.observe(year_changed, 'value') """ Explanation: On the slider value callback (a function that is triggered everytime the value of the slider is changed) we change the x, y and size co-ordinates of the Scatter. We also update the text of the Label to reflect the current year. End of explanation """ def button_clicked(value): animate_button.visible = False for i in range(1800, 2009, 1): year_slider.value = i sleep(0.05) animate_button.visible = True animate_button.on_click(button_clicked) """ Explanation: Defining the callback for the button When the Button is clicked, we trigger the animation by changing the slider value through all the years for which data is available. The change in the Slider value triggers a change in the plot. End of explanation """ display(VBox([animate_button, fig, year_slider])) """ Explanation: Displaying the GUI End of explanation """
JJINDAHOUSE/deep-learning
first-neural-network/Your_first_neural_network.ipynb
mit
%matplotlib inline %config InlineBackend.figure_format = 'retina' import numpy as np import pandas as pd import matplotlib.pyplot as plt """ Explanation: Your first neural network In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more. End of explanation """ data_path = 'Bike-Sharing-Dataset/hour.csv' rides = pd.read_csv(data_path) rides.head() """ Explanation: Load and prepare the data A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon! End of explanation """ rides[:24*10].plot(x='dteday', y='cnt') """ Explanation: Checking out the data This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above. Below is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model. End of explanation """ dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday'] for each in dummy_fields: dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False) rides = pd.concat([rides, dummies], axis=1) fields_to_drop = ['instant', 'dteday', 'season', 'weathersit', 'weekday', 'atemp', 'mnth', 'workingday', 'hr'] data = rides.drop(fields_to_drop, axis=1) data.head() """ Explanation: Dummy variables Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies(). End of explanation """ quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed'] # Store scalings in a dictionary so we can convert back later scaled_features = {} for each in quant_features: mean, std = data[each].mean(), data[each].std() scaled_features[each] = [mean, std] data.loc[:, each] = (data[each] - mean)/std """ Explanation: Scaling target variables To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1. The scaling factors are saved so we can go backwards when we use the network for predictions. End of explanation """ # Save data for approximately the last 21 days test_data = data[-21*24:] # Now remove the test data from the data set data = data[:-21*24] # Separate the data into features and targets target_fields = ['cnt', 'casual', 'registered'] features, targets = data.drop(target_fields, axis=1), data[target_fields] test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields] """ Explanation: Splitting the data into training, testing, and validation sets We'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders. End of explanation """ # Hold out the last 60 days or so of the remaining data as a validation set train_features, train_targets = features[:-60*24], targets[:-60*24] val_features, val_targets = features[-60*24:], targets[-60*24:] """ Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set). End of explanation """ class NeuralNetwork(object): def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Initialize weights self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, (self.input_nodes, self.hidden_nodes)) self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) self.lr = learning_rate #### TODO: Set self.activation_function to your implemented sigmoid function #### # # Note: in Python, you can define a function with a lambda expression, # as shown below. self.activation_function = lambda x : 1/(1+np.exp(-x)) # Replace 0 with your sigmoid calculation. ### If the lambda code above is not something you're familiar with, # You can uncomment out the following three lines and put your # implementation there instead. # #def sigmoid(x): # return 0 # Replace 0 with your sigmoid calculation here #self.activation_function = sigmoid def train(self, features, targets): ''' Train the network on batch of features and targets. Arguments --------- features: 2D array, each row is one data record, each column is a feature targets: 1D array of target values ''' n_records = features.shape[0] delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape) delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape) for X, y in zip(features, targets): #### Implement the forward pass here #### ### Forward pass ### # TODO: Hidden layer - Replace these values with your calculations. hidden_inputs = np.dot(X, self.weights_input_to_hidden) # signals into hidden layer hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer # TODO: Output layer - Replace these values with your calculations. final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer final_outputs = final_inputs # signals from final output layer #### Implement the backward pass here #### ### Backward pass ### # TODO: Output error - Replace this value with your calculations. error = y - final_outputs # Output layer error is the difference between desired target and actual output. # TODO: Calculate the hidden layer's contribution to the error hidden_error = np.dot(self.weights_hidden_to_output, error) # TODO: Backpropagated error terms - Replace these values with your calculations. output_error_term = error hidden_error_term = hidden_error * hidden_outputs * (1 - hidden_outputs) # Weight step (input to hidden) delta_weights_i_h += hidden_error_term * X[:, None] # Weight step (hidden to output) delta_weights_h_o += output_error_term * hidden_outputs[:, None] # TODO: Update the weights - Replace these values with your calculations. self.weights_hidden_to_output += self.lr * delta_weights_h_o / n_records # update hidden-to-output weights with gradient descent step self.weights_input_to_hidden += self.lr * delta_weights_i_h / n_records # update input-to-hidden weights with gradient descent step def run(self, features): ''' Run a forward pass through the network with input features Arguments --------- features: 1D array of feature values ''' #### Implement the forward pass here #### # TODO: Hidden layer - replace these values with the appropriate calculations. hidden_inputs = np.dot(features, self.weights_input_to_hidden) # signals into hidden layer hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer # TODO: Output layer - Replace these values with the appropriate calculations. final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output) # signals into final output layer final_outputs = final_inputs # signals from final output layer return final_outputs def MSE(y, Y): return np.mean((y-Y)**2) """ Explanation: Time to build the network Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes. <img src="assets/neural_network.png" width=300px> The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation. We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation. Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$. Below, you have these tasks: 1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function. 2. Implement the forward pass in the train method. 3. Implement the backpropagation algorithm in the train method, including calculating the output error. 4. Implement the forward pass in the run method. End of explanation """ import unittest inputs = np.array([[0.5, -0.2, 0.1]]) targets = np.array([[0.4]]) test_w_i_h = np.array([[0.1, -0.2], [0.4, 0.5], [-0.3, 0.2]]) test_w_h_o = np.array([[0.3], [-0.1]]) class TestMethods(unittest.TestCase): ########## # Unit tests for data loading ########## def test_data_path(self): # Test that file path to dataset has been unaltered self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv') def test_data_loaded(self): # Test that data frame loaded self.assertTrue(isinstance(rides, pd.DataFrame)) ########## # Unit tests for network functionality ########## def test_activation(self): network = NeuralNetwork(3, 2, 1, 0.5) # Test that the activation function is a sigmoid self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5)))) def test_train(self): # Test that weights are updated correctly on training network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() network.train(inputs, targets) self.assertTrue(np.allclose(network.weights_hidden_to_output, np.array([[ 0.37275328], [-0.03172939]]))) self.assertTrue(np.allclose(network.weights_input_to_hidden, np.array([[ 0.10562014, -0.20185996], [0.39775194, 0.50074398], [-0.29887597, 0.19962801]]))) def test_run(self): # Test correctness of run method network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() self.assertTrue(np.allclose(network.run(inputs), 0.09998924)) suite = unittest.TestLoader().loadTestsFromModule(TestMethods()) unittest.TextTestRunner().run(suite) """ Explanation: Unit tests Run these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project. End of explanation """ import sys ### Set the hyperparameters here ### iterations = 1200 learning_rate = 0.1 hidden_nodes = 25 output_nodes = 1 N_i = train_features.shape[1] network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate) losses = {'train':[], 'validation':[]} for ii in range(iterations): # Go through a random batch of 128 records from the training data set batch = np.random.choice(train_features.index, size=128) X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt'] network.train(X, y) # Printing out the training progress train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values) val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values) sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \ + "% ... Training loss: " + str(train_loss)[:5] \ + " ... Validation loss: " + str(val_loss)[:5]) sys.stdout.flush() losses['train'].append(train_loss) losses['validation'].append(val_loss) plt.plot(losses['train'], label='Training loss') plt.plot(losses['validation'], label='Validation loss') plt.legend() _ = plt.ylim() """ Explanation: Training the network Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops. You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later. Choose the number of iterations This is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, if you use too many iterations, then the model with not generalize well to other data, this is called overfitting. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. As you start overfitting, you'll see the training loss continue to decrease while the validation loss starts to increase. Choose the learning rate This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge. Choose the number of hidden nodes The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose. End of explanation """ fig, ax = plt.subplots(figsize=(8,4)) mean, std = scaled_features['cnt'] predictions = network.run(test_features).T*std + mean ax.plot(predictions[0], label='Prediction') ax.plot((test_targets['cnt']*std + mean).values, label='Data') ax.set_xlim(right=len(predictions)) ax.legend() dates = pd.to_datetime(rides.ix[test_data.index]['dteday']) dates = dates.apply(lambda d: d.strftime('%b %d')) ax.set_xticks(np.arange(len(dates))[12::24]) _ = ax.set_xticklabels(dates[12::24], rotation=45) """ Explanation: Check out your predictions Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly. End of explanation """
GoogleCloudPlatform/bigquery-oreilly-book
09_bqml/text_embeddings.ipynb
apache-2.0
import tensorflow as tf import tensorflow_hub as tfhub model = tf.keras.Sequential() model.add(tfhub.KerasLayer("https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1", output_shape=[20], input_shape=[], dtype=tf.string)) model.summary() model.predict([""" Long years ago, we made a tryst with destiny; and now the time comes when we shall redeem our pledge, not wholly or in full measure, but very substantially. At the stroke of the midnight hour, when the world sleeps, India will awake to life and freedom. A moment comes, which comes but rarely in history, when we step out from the old to the new -- when an age ends, and when the soul of a nation, long suppressed, finds utterance. """]) """ Explanation: Document embeddings in BigQuery for document similarity and clustering tasks This notebook shows how to do use a pre-trained embedding as a vector representation of a natural language text column. Given this embedding, we can load it as a BQ-ML model and then carry out document similarity or clustering. This notebook accompanies the following Medium blog post: https://medium.com/@lakshmanok/how-to-do-text-similarity-search-and-document-clustering-in-bigquery-75eb8f45ab65 Embedding model for documents We're going to use a model that has been pretrained on Google News. Here's an example of how it works in Python. We will use it directly in BigQuery, however. End of explanation """ %%bash BUCKET=ai-analytics-solutions-kfpdemo # CHANGE AS NEEDED rm -rf tmp mkdir tmp FILE=swivel.tar.gz wget --quiet -O tmp/swivel.tar.gz https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1?tf-hub-format=compressed cd tmp tar xvfz swivel.tar.gz cd .. mv tmp swivel gsutil -m cp -R swivel gs://${BUCKET}/swivel rm -rf swivel echo "Model artifacts are now at gs://${BUCKET}/swivel/*" """ Explanation: Loading model into BigQuery The Swivel model above is already available in SavedModel format. But we need it on Google Cloud Storage before we can load it into BigQuery. End of explanation """ %%bigquery CREATE OR REPLACE MODEL advdata.swivel_text_embed OPTIONS(model_type='tensorflow', model_path='gs://ai-analytics-solutions-kfpdemo/swivel/*') """ Explanation: Let's load the model into a BigQuery dataset named advdata (create it if necessary) End of explanation """ %%bigquery SELECT output_0 FROM ML.PREDICT(MODEL advdata.swivel_text_embed,( SELECT "Long years ago, we made a tryst with destiny; and now the time comes when we shall redeem our pledge, not wholly or in full measure, but very substantially." AS sentences)) """ Explanation: From the BigQuery web console, click on "schema" tab for the newly loaded model. We see that the input is called sentences and the output is called output_0: <img src="swivel_schema.png" /> End of explanation """ %%bigquery SELECT EXTRACT(DAYOFYEAR from timestamp) AS julian_day, ST_GeogPoint(longitude, latitude) AS location, comments FROM `bigquery-public-data.noaa_preliminary_severe_storms.wind_reports` WHERE EXTRACT(YEAR from timestamp) = 2019 LIMIT 10 """ Explanation: Document search Let's use the embeddings to return similar strings. We'll use the comments field of a storm reports table from NOAA. End of explanation """ %%bigquery CREATE TEMPORARY FUNCTION td(a ARRAY<FLOAT64>, b ARRAY<FLOAT64>, idx INT64) AS ( (a[OFFSET(idx)] - b[OFFSET(idx)]) * (a[OFFSET(idx)] - b[OFFSET(idx)]) ); CREATE TEMPORARY FUNCTION term_distance(a ARRAY<FLOAT64>, b ARRAY<FLOAT64>) AS (( SELECT SQRT(SUM( td(a, b, idx))) FROM UNNEST(GENERATE_ARRAY(0, 19)) idx )); WITH search_term AS ( SELECT output_0 AS term_embedding FROM ML.PREDICT(MODEL advdata.swivel_text_embed,(SELECT "power line down on a home" AS sentences)) ) SELECT term_distance(term_embedding, output_0) AS termdist, comments FROM ML.PREDICT(MODEL advdata.swivel_text_embed,( SELECT comments, LOWER(comments) AS sentences FROM `bigquery-public-data.noaa_preliminary_severe_storms.wind_reports` WHERE EXTRACT(YEAR from timestamp) = 2019 )), search_term ORDER By termdist ASC LIMIT 10 """ Explanation: Let's define a distance function and then do a search for matching documents to the search string "power line down on a home". Note that the matches include "house" as a synonym for home. And not as good, but close matches all include "power line" as the more distinctive term. End of explanation """ %%bigquery CREATE TEMPORARY FUNCTION arr_to_input_20(arr ARRAY<FLOAT64>) RETURNS STRUCT<p1 FLOAT64, p2 FLOAT64, p3 FLOAT64, p4 FLOAT64, p5 FLOAT64, p6 FLOAT64, p7 FLOAT64, p8 FLOAT64, p9 FLOAT64, p10 FLOAT64, p11 FLOAT64, p12 FLOAT64, p13 FLOAT64, p14 FLOAT64, p15 FLOAT64, p16 FLOAT64, p17 FLOAT64, p18 FLOAT64, p19 FLOAT64, p20 FLOAT64> AS ( STRUCT( arr[OFFSET(0)] , arr[OFFSET(1)] , arr[OFFSET(2)] , arr[OFFSET(3)] , arr[OFFSET(4)] , arr[OFFSET(5)] , arr[OFFSET(6)] , arr[OFFSET(7)] , arr[OFFSET(8)] , arr[OFFSET(9)] , arr[OFFSET(10)] , arr[OFFSET(11)] , arr[OFFSET(12)] , arr[OFFSET(13)] , arr[OFFSET(14)] , arr[OFFSET(15)] , arr[OFFSET(16)] , arr[OFFSET(17)] , arr[OFFSET(18)] , arr[OFFSET(19)] )); CREATE OR REPLACE MODEL advdata.storm_reports_clustering OPTIONS(model_type='kmeans', NUM_CLUSTERS=10) AS SELECT arr_to_input_20(output_0) AS comments_embed, EXTRACT(DAYOFYEAR from timestamp) AS julian_day, longitude, latitude FROM ML.PREDICT(MODEL advdata.swivel_text_embed,( SELECT timestamp, longitude, latitude, LOWER(comments) AS sentences FROM `bigquery-public-data.noaa_preliminary_severe_storms.wind_reports` WHERE EXTRACT(YEAR from timestamp) = 2019 )) """ Explanation: Document clustering We can use the embeddings as input to a K-Means clustering model. To make things interesting, let's also include the day and location. K-Means at present doesn't accept arrays as input, so I'm defining a function to make it a struct with named parameters. End of explanation """ %%bigquery CREATE TEMPORARY FUNCTION arr_to_input_20(arr ARRAY<FLOAT64>) RETURNS STRUCT<p1 FLOAT64, p2 FLOAT64, p3 FLOAT64, p4 FLOAT64, p5 FLOAT64, p6 FLOAT64, p7 FLOAT64, p8 FLOAT64, p9 FLOAT64, p10 FLOAT64, p11 FLOAT64, p12 FLOAT64, p13 FLOAT64, p14 FLOAT64, p15 FLOAT64, p16 FLOAT64, p17 FLOAT64, p18 FLOAT64, p19 FLOAT64, p20 FLOAT64> AS ( STRUCT( arr[OFFSET(0)] , arr[OFFSET(1)] , arr[OFFSET(2)] , arr[OFFSET(3)] , arr[OFFSET(4)] , arr[OFFSET(5)] , arr[OFFSET(6)] , arr[OFFSET(7)] , arr[OFFSET(8)] , arr[OFFSET(9)] , arr[OFFSET(10)] , arr[OFFSET(11)] , arr[OFFSET(12)] , arr[OFFSET(13)] , arr[OFFSET(14)] , arr[OFFSET(15)] , arr[OFFSET(16)] , arr[OFFSET(17)] , arr[OFFSET(18)] , arr[OFFSET(19)] )); SELECT sentences FROM ML.PREDICT(MODEL `ai-analytics-solutions.advdata.storm_reports_clustering`, ( SELECT sentences, arr_to_input_20(output_0) AS comments_embed, EXTRACT(DAYOFYEAR from timestamp) AS julian_day, longitude, latitude FROM ML.PREDICT(MODEL advdata.swivel_text_embed,( SELECT timestamp, longitude, latitude, LOWER(comments) AS sentences FROM `bigquery-public-data.noaa_preliminary_severe_storms.wind_reports` WHERE EXTRACT(YEAR from timestamp) = 2019 )))) WHERE centroid_id = 1 LIMIT 10 """ Explanation: The resulting clusters look like this <img src="storm_reports_clusters.png"/> Show a few of the comments from cluster #1 End of explanation """ %%bigquery CREATE TEMPORARY FUNCTION arr_to_input_20(arr ARRAY<FLOAT64>) RETURNS STRUCT<p1 FLOAT64, p2 FLOAT64, p3 FLOAT64, p4 FLOAT64, p5 FLOAT64, p6 FLOAT64, p7 FLOAT64, p8 FLOAT64, p9 FLOAT64, p10 FLOAT64, p11 FLOAT64, p12 FLOAT64, p13 FLOAT64, p14 FLOAT64, p15 FLOAT64, p16 FLOAT64, p17 FLOAT64, p18 FLOAT64, p19 FLOAT64, p20 FLOAT64> AS ( STRUCT( arr[OFFSET(0)] , arr[OFFSET(1)] , arr[OFFSET(2)] , arr[OFFSET(3)] , arr[OFFSET(4)] , arr[OFFSET(5)] , arr[OFFSET(6)] , arr[OFFSET(7)] , arr[OFFSET(8)] , arr[OFFSET(9)] , arr[OFFSET(10)] , arr[OFFSET(11)] , arr[OFFSET(12)] , arr[OFFSET(13)] , arr[OFFSET(14)] , arr[OFFSET(15)] , arr[OFFSET(16)] , arr[OFFSET(17)] , arr[OFFSET(18)] , arr[OFFSET(19)] )); SELECT sentences FROM ML.PREDICT(MODEL `ai-analytics-solutions.advdata.storm_reports_clustering`, ( SELECT sentences, arr_to_input_20(output_0) AS comments_embed, EXTRACT(DAYOFYEAR from timestamp) AS julian_day, longitude, latitude FROM ML.PREDICT(MODEL advdata.swivel_text_embed,( SELECT timestamp, longitude, latitude, LOWER(comments) AS sentences FROM `bigquery-public-data.noaa_preliminary_severe_storms.wind_reports` WHERE EXTRACT(YEAR from timestamp) = 2019 )))) WHERE centroid_id = 3 """ Explanation: As you can see, these are basically uninformative comments. How about centroid #3? End of explanation """
jhillairet/scikit-rf
doc/source/examples/networktheory/Transmission Line Losses.ipynb
bsd-3-clause
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import skrf as rf rf.stylely() """ Explanation: Transmission Line Losses on a Loaded Lossy Line When dealing with RF power, for instance in radio, industry or scientific applications, a recurrent problem is to handle the inevitable RF losses correctly to avoid overheating of cables and components. Matched Load In this example, we will use scikit-rf to evaluate the losses in a 50 Ohm, 20 meters long, RG-8 cable (VF=0.84) charged with a resistive load $R_L=50\Omega$ at 13.56 MHz. The cable losses are estimated to 1.483 dB/100 meters and the source power to 400W. First, the usual Python imports: End of explanation """ Pin = 400 # W z0 = 50 # Ohm freq = rf.Frequency(13.56, npoints=1, unit='MHz') VF = 0.84 RL = 50 # Ohm L = 20 # m """ Explanation: Let's define the problem constants: End of explanation """ alpha = rf.db_2_np(1.483/100) # Np/m beta = freq.w/rf.c/VF gamma = alpha + 1j*beta """ Explanation: The propagation constant of the transmission line $\gamma=\alpha+j\beta$ is: End of explanation """ a = np.exp(2*alpha*L) # also simply 2.84/100*20 print('Matched line loss: a=', rf.mag_2_db10(a), 'dB') """ Explanation: The matched line loss (or power attenuation), $a=e^{2\alpha L}$, is: End of explanation """ print('(Forward) Power delivered to the load:', Pin/a, 'W') print('Power lost in the cable:', Pin *( 1 - 1/a), 'W') """ Explanation: If the line is charged with a matched load, ie. $R_L=50\Omega$, then the total line loss is $a$. The power lost in the cable will thus be: End of explanation """ a_skrf = rf.zl_2_total_loss(z0, zl=RL, theta=gamma*L) print('Power lost in the cable:', Pin * (1 - 1/a_skrf), 'W') """ Explanation: Which can also be checked by the scikit-rf transmission line function zl_2_total_loss: End of explanation """ # reflection coefficient and input impedance Gamma_in = rf.zl_2_Gamma_in(z0, RL, theta=gamma*L) Z_in = rf.zl_2_zin(z0, RL, theta=gamma*L) # voltage and current at the line input as a function of source power V_in = np.sqrt(2*z0*Pin)*(1 + Gamma_in) I_in = V_in/Z_in # voltage and current at z=L V,I = rf.voltage_current_propagation(V_in, I_in, z0, gamma*L) P_delivered = 1/2 * np.real(V * np.conj(I)) print('Power delivered to the load: ', P_delivered, 'W') print('Power dissipated in the cable: ',Pin - P_delivered, 'W') """ Explanation: Another way to evaluate the total power dissipated in the circuit is to evaluate the power expression: $$ P_{delivered} = \frac{1}{2} \Re \left[ V I^* \right] $$ where $V$ and $I$ are the (peak) total voltage and current. They can be evaluate using the transmission line function voltage_current_propagation: End of explanation """ z0 = 50 ZL = 200 - 30j Gamma_load = rf.zl_2_Gamma0(z0, ZL) print('|Gamma_load|=', np.abs(Gamma_load)) """ Explanation: Non Matched load However, if the load is not perfectly matched to the line characteristic impedance $z_0$, for example with $R_L=200 + 30j\Omega$, additional losses are induced by the reflected wave. The reflection coefficient $\Gamma_{load}$ induced by this load is: End of explanation """ Gamma_in = rf.zl_2_Gamma_in(z0, ZL, theta=gamma*L) SWR = rf.Gamma0_2_swr(rf.zl_2_Gamma_in(z0, ZL, theta=gamma*L)) print('|Gamma_in|=', np.abs(Gamma_in), '(SWR=', SWR,')') """ Explanation: while the reflection coefficient seen at the input of the transmission line $\Gamma_{in}$ is: End of explanation """ 10*np.log10(a) + 10*np.log10((1 - np.abs(Gamma_in)**2)/(1 - np.abs(Gamma_load)**2)) """ Explanation: The total loss in dB due to SWR is often stated as: $$ a_{[dB]} + 10 \log_{10} \frac{1 - |\Gamma_{in}|^2}{1 - |\Gamma_{load}|^2} $$ End of explanation """ a = rf.zl_2_total_loss(z0, zl=ZL, theta=gamma * L) print('Total power loss: ', rf.mag_2_db10(a), 'dB' ) print('Delivered power:', Pin/a, 'W') print('The total power loss is the cable:', Pin*(1 - 1/a), 'W') # reflection coefficient and input impedance Gamma_in = rf.zl_2_Gamma_in(z0, ZL, theta=gamma*L) Z_in = rf.zl_2_zin(z0, ZL, theta=gamma*L) # voltage and current at the line input as a function of source power V_in = np.sqrt(2*z0*Pin)*(1 + Gamma_in) I_in = V_in/Z_in # voltage and current at z=L V,I = rf.voltage_current_propagation(V_in, I_in, z0, gamma*L) P_delivered = 1/2 * np.real(V * np.conj(I)) print('Power delivered to the load: ', P_delivered, 'W') print('Power dissipated in the cable: ',Pin - P_delivered, 'W') rf.Gamma0_2_swr(Gamma_in) 10*np.log10(P_delivered/Pin) """ Explanation: However, this expression is only correct if either properties are verified: - (i) the characteristic impedance of the line is real (distorsionless line) - (ii) reflection coefficient is real (ie real $Z_L/Z_0$) [1]. The 1st condition is met here, however it will not be the case in the next section. Anyway, the scikit-rf transmission line function zl_2_total_loss is correct in all conditions: End of explanation """ Z_L = 4.5 - 1673j R_0 = 402.75 freq = rf.Frequency(1.83, npoints=1, unit='MHz') VF = 0.915 L = rf.feet_2_meter(100) """ Explanation: A more advanced example This example reproduces the example presented in reference [1]. Let's assume a coaxial line (Wireman #551, 450-Ohm) loaded with a complex load $Z_L=R_L + jX_L$, with the following parameters: - line length: 100 feet - frequency: 1.83 MHz - attenuation constant: $\alpha=$ 0.095 dB/100 feet - coaxial relative permittivity: $\epsilon_r=1.194$ (Velocity Factor VF=0.915) - real part of the characteristic impedance: $R_0 = \Re \left[Z_0\right]$=402.75 Ohm - Load resistance: $R_L$ = 4.5 Ohm - Load reactance: $X_L$ = -1673 Ohm End of explanation """ alpha = rf.db_2_np(rf.db_per_100feet_2_db_per_100meter(0.095)/100) beta = freq.w/rf.c/VF gamma = alpha + 1j*beta print(gamma) """ Explanation: First, we can derive the propagation constant $\gamma=\alpha+j\beta$ with $\beta=\frac{\omega}{c }\sqrt{\epsilon_r}$ from the problem parameters: End of explanation """ X_0 = -alpha/beta*R_0 Z_0 = R_0 + 1j*X_0 print('X_0=', X_0) """ Explanation: However, the transmission line characteristic reactance is not given in the problem parameters and must be determined. It can be approximated from a high-frequency, low-loss approximation [1]: $$ Z_0 = R_0 + j X_0 \approx R_0 - j \frac{\alpha}{\beta}R_0 $$ ie, $$ X_0 \approx - \frac{\alpha}{\beta}R_0 $$ End of explanation """ print('Gamma at load:', np.abs(rf.zl_2_Gamma0(Z_0, Z_L))) print('Gamma at input:', np.abs(rf.zl_2_Gamma_in(Z_0, Z_L, theta=gamma*L))) print('SWR at load:', rf.Gamma0_2_swr(rf.zl_2_Gamma0(Z_0, Z_L))) print('SWR at input:', rf.Gamma0_2_swr(rf.zl_2_Gamma_in(Z_0, Z_L, theta=gamma*L))) print('Input impedance:', rf.input_impedance_at_theta(Z_0, Z_L, theta=gamma*L ), 'Ohm') total_loss_db = rf.mag_2_db10(np.abs(rf.zl_2_total_loss(z0=Z_0, zl=Z_L, theta=gamma*L))) print('Total loss:', total_loss_db, 'dB') """ Explanation: Now that we have both the characteristic impedance and the propagation constant of the line, the reflection coefficients, input impedance and total loss can be deduced: End of explanation """
MTG/sms-tools
notebooks/E4-STFT.ipynb
agpl-3.0
import os import sys import numpy as np from scipy.signal import get_window from scipy.fftpack import fft, fftshift import math import matplotlib.pyplot as plt %matplotlib notebook eps = np.finfo(float).eps sys.path.append('../software/models/') import stft import utilFunctions as UF # E4 - 1.1: Complete function extract_main_lob() def extract_main_lobe(window, M, N): """Extract the main lobe of the magnitude spectrum of a window, given a window type and its length. Args: window (str): Window type to be used (either rectangular ('boxcar'), 'hamming' or 'blackmanharris') M (int): length of the window to be used N (int): size of FFT Results: np.array: an array containing the main lobe of the magnitude spectrum of the window in decibels (dB). """ w = get_window(window, M) # get the window ### Your code here """ Explanation: Exercise 4: Short-time Fourier Transform Doing this exercise you will learn about the concept of the main lobe width of the spectrum of a window and you will better understand the short-time Fourier transform (STFT). You will also use the STFT to extract basic rhythm related information from an audio signal, implementing an onset detection function, which is one of the rhythm descriptors often used in music information retrieval to detect onsets of acoustic events. There are four parts in this exercise. 1) Extracting the main lobe of the spectrum of a window, 2) Measuring noise in the reconstructed signal using the STFT model, 3) Computing band-wise energy envelopes of a signal, 4) Computing an onset detection function. Relevant Concepts Main lobe of the spectrum of a window: The width of the main lobe of the magnitude spectrum of a window is an important characteristic used in deciding which window type is best for the analysis of an audio excerpt. There exists a tradeoff between the main lobe width and the side lobe attenuation. Typically for windows with a narrower main lobe, the side lobes are less attenuated. An interesting fact is that changing the length of a window $M$ doesn't affect the main lobe width of the spectrum of the window in samples. Note that if you use zero-padding for computing the spectrum of a window, the main lobe width will be multiplied by the zero-padding factor. Fast Fourier Transform (FFT): An efficient way to compute the discrete Fourier transform of a signal is the fast Fourier transform, FFT. The FFT algorithm factorizes the DFT matrix in order to exploit the symmetries in the DFT equation. FFT computation is specially very efficient when the FFT size is a power of 2. Therefore, whenever possible we use an FFT size that is a power of 2. Energy of a signal: The energy of a signal $x[n]$ of length $N$ can be computed in the discrete time domain as follows: \begin{equation} E=\overset{N-1}{\underset{n=0}{\sum}}\left|x\left[n\right]\right|^{2} \end{equation} Energy in a frequency band: Given the DFT spectrum of the signal $X[k]$, the energy $E$ in a specific frequency band spanning the bin index $k_1$ to $k_2$ can be computed as: \begin{equation} E=\overset{k_{2}}{\underset{k=k_{1}}{\sum}}\left|X\left[k\right]\right|^{2} \end{equation} Note that in this computation the $X[k]$ values are not in decibels (dB). The stftAnal() function returns magnitude spectra in dB scale, which should be converted to linear scale before the energy computation. Once the energy is computed it can be converted back to the dB scale as: \begin{equation} E_{dB}= 10\,\log_{10}(E) \end{equation} Signal to noise ratio (SNR): Signal to noise ratio (SNR) is a frequently used measure to quantify the amount of noise present/added in a signal. In the context of this exercise it can be computed in decibels (dB) as: \begin{equation} \mathrm{SNR} = 10\,\log_{10}\left(\frac{E_{\mathrm{signal}}}{E_{\mathrm{noise}}}\right) \end{equation} where, $E_{\mathrm{signal}}$ and $E_{\mathrm{noise}}$ are the energy of the signal and noise respectively. For our case of analysis and synthesis using a specific model (for example, STFT) noise can be thought of as the difference between the input signal and the output signal of the model. Onset detection function: An onset detection function (ODF) refers to a continuous function (one value per audio frame) often used for detecting acoustic events in an audio stream. In music information retrieval (MIR), ODFs are typically used for detecting onsets of musical notes and percussion strokes. An ODF generally has high values at the onsets of acoustic events. A simple ODF can be computed by taking the difference between the energy values of consecutive frames, as shown below: \begin{equation} O(l) = E(l) - E(l-1) ,\,\,\,\,\, l \geq 1 \end{equation} where, $O(l)$ is the ODF computed at frame index $l$ and $E$ is the energy of the signal in a particular frequency band in decibels (dB). Often, multiple ODFs are computed with different frequency bands across the spectrum. In order to detect only the onsets of the events and not the offsets, it is a common practice to half wave rectify the ODF and obtain $\bar{O}(l)$. Half wave rectification of the ODF is given by: \begin{equation} \bar{O}(l)=\begin{cases} \begin{array}{c} O(l), \, \mathrm{if} \,\,O(l)>0\ \,\,\,\,0, \,\,\,\,\, \mathrm{if} \,\, O(l)\leq0 \end{array}\end{cases} \end{equation} Part 1 - Extracting the main lobe of the spectrum of a window The function extract_main_lobe() should extract the main lobe of the magnitude spectrum of a window given a window type, window, and its length, M. The function should return the samples corresponding to the main lobe in decibels (dB). To compute the spectrum you should take the FFT size (N) to be 8 times the window length (N = 8*M) (For this part, N does not need to be a power of 2). The input arguments to the function are the window type (window) and the length of the window (M). The function should return a numpy array containing the samples corresponding to the main lobe of the window. In the returned numpy array you should include the samples corresponding to both the local minimas across the main lobe. The possible window types that you can expect as input are rectangular ('boxcar'), 'hamming' or 'blackmanharris'. NOTE: You can approach this task by writing code to find the indices of the local minimas across the main lobe. Tip: log10(0) is not well defined, so its a common practice to add a small value such as eps = 1e-16 to the magnitude spectrum before computing it in dB. This is optional and will not affect your answers. If you find it difficult to concatenate the two halves of the main lobe, you can first center the spectrum using fftshift() and then compute the indexes of the minimas around the main lobe. End of explanation """ # E4 - 1.2: Call extract_main_lobe() with the 3 test cases mentioned and plot the magnitude spectra of the windows with # the location of the main-lobe boundaries. Compute and explain the normalized main-lobe width (dividing by the # zero-padding factor), comparing it with the values presented in the theory. ### Your code here """ Explanation: Test cases for the function extract_main_lobe(): Test case 1: If you run your code using window = 'blackmanharris', M = 100 and N = 800, the output numpy array should contain 65 samples. Test case 2: If you run your code using window = 'boxcar', M = 120 and N = 960, the output numpy array should contain 17 samples. Test case 3: If you run your code using window = 'hamming, M = 256, and N = 2048, the output numpy array should contain 33 samples. To understand better the result you can plot the magnitude spectrum of the window and mark the boundaries of the main lobe. End of explanation """ # E4 - 2.1: Complete function compute_snr() def compute_snr(input_file, window, M, N, H): """Measure the amount of distortion introduced during the analysis and synthesis of a signal using the STFT model. Args: input_file (str): wav file name including the path window (str): analysis window type (rectangular, triangular, hanning, hamming, blackman, or blackmanharris) M (int): analysis window length (odd positive integer) N (int): fft size (power of two, > M) H (int): hop size for the stft computation Result: tuple with the signal to noise ratio over the whole sound and of the sound without the begining and end. """ ### your code here """ Explanation: Part 2 - Measuring noise in the reconstructed signal using the STFT model The function compute_snr() should measure the amount of distortion introduced during the analysis and synthesis of a signal using the STFT model. Use SNR (signal to noise ratio) in dB to quantify the amount of noise. Use the stft() function in stft.py to do an analysis followed by a synthesis of the input signal. Use the time domain energy definition to compute the SNR. With the input signal and the obtained output, compute two different SNR values for the following cases: SNR1: Over the entire length of the input and the output signals. SNR2: For the segment of the signals left after discarding M samples from both the start and the end, where M is the analysis window length. Note that the computations are done after STFT analysis and synthesis. The input arguments to the function are the wav file name including the path (input_file), window type (window), window length (M), FFT size (N), and hop size (H). The function should return a python tuple of both the SNR values in decibels. Both SNR1 and SNR2 are float values. End of explanation """ # E4 - 2.2: Call the function compute_snr() for the 3 test cases mentioned, explain the results ### Your code here """ Explanation: Test cases for the function compute_snr(): Test case 1: If you run your code using piano.wav file with 'blackman' window, M = 513, N = 2048 and H = 128, the output SNR values should be around: (67.57748352378475, 86.35716169253175). Test case 2: If you run your code using sax-phrase-short.wav file with 'hamming' window, M = 512, N = 1024 and H = 64, the output SNR values should be around: (89.510506656299285, 306.18696700251388). Test case 3: If you run your code using rain.wav file with 'hann' window, M = 1024, N = 2048 and H = 128, the output SNR values should be around: (74.631476225366825, 304.26918192997738). Due to precision differences on different machines/hardware, compared to the expected SNR values, your output values can differ substantially. End of explanation """ # E4 - 3.1: Complete function compute_eng_env() def compute_eng_env(input_file, window, M, N, H): """Compute band-wise energy envelopes of a given audio signal using the STFT. Args: input_file (string): input sound file (monophonic with sampling rate of 44100) window (string): analysis window type (choice of rectangular, triangular, hanning, hamming, blackman, blackmanharris) M (integer): analysis window size (odd positive integer) N (integer): FFT size (power of 2, such that N > M) H (integer): hop size for the stft computation Result: np.array: magnitude spectra of sound (2D array) np.array: 2D numpy array with energy envelope of band 0 < f < 3000 Hz (in dB) in first column, [:,0] np.array: energy envelope of band 3000 < f < 10000 Hz (in dB) in second column [:,1] """ ### your code here """ Explanation: Part 3 - Computing band-wise energy envelopes of a signal The function compute_eng_env() should compute band-wise energy envelopes of a given audio signal using the STFT. Consider two frequency bands, low and high. The low frequency band is the set of frequencies between 0 and 3000 Hz and the high frequency band is the set of frequencies between 3000 and 10000 Hz (excluding the boundary frequencies in both the cases). At a given frame, the value of the energy envelope of a band can be computed as the sum of squared values of all the frequency coefficients in that band. Then you should compute the energy envelopes in decibels. The input arguments are the wav file name including the path (input_file), window type (window), window length (M), FFT size (N) and hop size (H). The function should return a numpy array with two columns, where the first column is the energy envelope of the low frequency band and the second column is that of the high frequency band. Use stftAnal() function from stft.py module to obtain the magnitude spectra for all the audio frames of the input sound. Then compute the energy values for each frequency band specified. While calculating frequency bins for each frequency band, consider only the bins that are within the specified frequency range. For example, for the low frequency band consider only the bins with frequency > 0 Hz and < 3000 Hz (you can use np.where() to find those bin indexes). This way you also remove the DC offset of the signal in energy envelope computation. The frequency corresponding to the bin index k can be computed as k*fs/N, where fs is the sampling rate of the signal. End of explanation """ # E4 - 3.2: Call the function compute_eng_env() and plot the envelopes together with the # spectrogram of the signal ### Your code here """ Explanation: Test cases for the function compute_eng_env(): Test case 1: Use piano.wav file with window = 'blackman', M = 513, N = 1024 and H = 128 as input. The bin indexes of the low frequency band span from 1 to 69 (69 samples) and of the high frequency band span from 70 to 232 (163 samples). Test case 2: Use piano.wav file with window = 'blackman', M = 2047, N = 4096 and H = 128 as input. The bin indexes of the low frequency band span from 1 to 278 (278 samples) and of the high frequency band span from 279 to 928 (650 samples). Test case 3: Use sax-phrase-short.wav file with window = 'hamming', M = 513, N = 2048 and H = 256 as input. The bin indexes of the low frequency band span from 1 to 139 (139 samples) and of the high frequency band span from 140 to 464 (325 samples). To get a better understanding of the band-wise energy envelope and its characteristics you can plot the envelopes together with the spectrogram of the signal. You can use matplotlib plotting library for this purpose. To visualize the spectrogram of a signal, a good option is to use the function colormesh() (you can reuse the code in lectures/4-STFT/plots-code/spectrogram.py). Either overlay the envelopes on the spectrogram or plot them in a different subplot. Make sure you use the same range of the x-axis for both the spectrogram and the energy envelopes. You can clearly notice the sharp attacks and decay of the piano notes for test case 1. You can compare this with the output from test case 2 that uses a larger window. You can infer the influence of window size on sharpness of the note attacks. End of explanation """ # E4 4.1: Complete function compute_odf() def compute_odf(input_file, window, M, N, H): """Compute a simple onset detection function (ODF) using the STFT. Args: input_file (str): input sound file (monophonic with sampling rate of 44100) window (str): analysis window type (rectangular, triangular, hanning, hamming, blackman, or blackmanharris) M (int): analysis window size (odd integer value) N (int): fft size (power of two, bigger or equal than than M) H (int): hop size for the STFT computation Result: np.array: magnitude spectra of sound (2D array) np.array: D numpy array with ODF computed in band 0 < f < 3000 Hz (in dB) in first column, [:,0] np.array: ODF computed of band 3000 < f < 10000 Hz (in dB) in second column [:,1] """ ### your code here """ Explanation: Part 4 - Computing onset detection function The function compute_odf() should compute a simple onset detection function (ODF) using the STFT. It should compute two ODFs one for each of the frequency bands, low and high. The low frequency band is the set of all the frequencies between 0 and 3000 Hz and the high frequency band is the set of all the frequencies between 3000 and 10000 Hz (excluding the boundary frequencies in both the cases). Start with an initial condition of ODF(0) = 0 in order to make the length of the ODF same as that of the energy envelope. Remember to apply a half wave rectification on the ODF. The input arguments to the function are the wav file name including the path (input_file), window type (window), window length (M), FFT size (N), and hop size (H). The function should return a numpy array with two columns, where the first column is the ODF computed on the low frequency band and the second column is the ODF computed on the high frequency band. Use stftAnal() to obtain the magnitude spectra for all the audio frames. Then compute the band-wise energy envelope for each frequency band specified. Finally compute the half wave rectify version of ODF from each energy envelope. End of explanation """ # E4 - 4.2: Call the function compute_odf() for the 3 test cases and plot the ODF functions together with the # spectrogram of the signal. ### Your code here # E4 - 4.3: Explain the results of Part 4 ''' ''' """ Explanation: Test cases for the function compute_odf(): Test case 1: Use piano.wav file with window = 'blackman', M = 513, N = 1024 and H = 128 as input. The bin indexes of the low frequency band span from 1 to 69 (69 samples) and of the high frequency band span from 70 to 232 (163 samples). Test case 2: Use piano.wav file with window = 'blackman', M = 2047, N = 4096 and H = 128 as input. The bin indexes of the low frequency band span from 1 to 278 (278 samples) and of the high frequency band span from 279 to 928 (650 samples). Test case 3: Use sax-phrase-short.wav file with window = 'hamming', M = 513, N = 2048 and H = 256 as input. The bin indexes of the low frequency band span from 1 to 139 (139 samples) and of the high frequency band span from 140 to 464 (325 samples). To get a better understanding of the ODFs and their characteristics you can plot the ODF functions together with the spectrogram of the signal. Use the same plotting than in the previos part. In order to identify the actual onsets of a signal you would need to find the peaks, local maxima, of the ODF functions using a magnitude threshold. Which of the 2 ODFs (low or high frequencies) would be most useful? What would be the right threshold for all 3 cases? Could you find a single threshold to be used for all 3 cases? For test case 1, you should clearly see that the ODFs have sharp peaks at the onset of the piano notes. You should notice 5 peaks that are above 10dB value in the ODF computed on the high frequency band. End of explanation """
GoogleCloudPlatform/mlops-on-gcp
immersion/guided_projects/guided_project_3_nlp_starter/tfx_starter.ipynb
apache-2.0
import absl import os import tempfile import time import pandas as pd import tensorflow as tf import tensorflow_data_validation as tfdv import tensorflow_model_analysis as tfma import tensorflow_transform as tft import tfx from pprint import pprint from tensorflow_metadata.proto.v0 import schema_pb2, statistics_pb2, anomalies_pb2 from tensorflow_transform.tf_metadata import schema_utils from tfx.components import CsvExampleGen from tfx.components import Evaluator from tfx.components import ExampleValidator from tfx.components import InfraValidator from tfx.components import Pusher from tfx.components import ResolverNode from tfx.components import SchemaGen from tfx.components import StatisticsGen from tfx.components import Trainer from tfx.components import Transform from tfx.components import Tuner from tfx.dsl.components.base import executor_spec from tfx.components.common_nodes.importer_node import ImporterNode from tfx.components.trainer import executor as trainer_executor from tfx.dsl.experimental import latest_blessed_model_resolver from tfx.orchestration import metadata from tfx.orchestration import pipeline from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext from tfx.proto import evaluator_pb2 from tfx.proto import example_gen_pb2 from tfx.proto import infra_validator_pb2 from tfx.proto import pusher_pb2 from tfx.proto import trainer_pb2 from tfx.proto.evaluator_pb2 import SingleSlicingSpec from tfx.types import Channel from tfx.types.standard_artifacts import Model from tfx.types.standard_artifacts import HyperParameters from tfx.types.standard_artifacts import ModelBlessing from tfx.types.standard_artifacts import InfraBlessing """ Explanation: Text Classification TFX Pipeline Starter Objective: In this notebook, we show you how to put a text classification model implemented in model.py, preprocessing.py, and config.py into an interactive TFX pipeline. Using these files and the code snippets in this notebook, you'll configure a TFX pipeline generated by the tfx template tool as in the previous guided project so that the text classification can be run on a CAIP Pipelines Kubeflow cluster. The dataset itself consists of article titles along with their source, and the goal is to predict the source from the title. (This dataset can be re-generated by running either the keras_for_text_classification.ipynb notebook or the reusable_embeddings.ipynb notebook, which contain different models to solve this problem.) The solution we propose here is fairly simple and you can build on it by inspecting these notebooks. End of explanation """ print("Tensorflow Version:", tf.__version__) print("TFX Version:", tfx.__version__) print("TFDV Version:", tfdv.__version__) print("TFMA Version:", tfma.VERSION_STRING) absl.logging.set_verbosity(absl.logging.INFO) """ Explanation: Note: this lab was developed and tested with the following TF ecosystem package versions: Tensorflow Version: 2.3.1 TFX Version: 0.25.0 TFDV Version: 0.25.0 TFMA Version: 0.25.0 If you encounter errors with the above imports (e.g. TFX component not found), check your package versions in the cell below. End of explanation """ os.environ['PATH'] += os.pathsep + '/home/jupyter/.local/bin' """ Explanation: If the versions above do not match, update your packages in the current Jupyter kernel below. The default %pip package installation location is not on your system installation PATH; use the command below to append the local installation path to pick up the latest package versions. Note that you may also need to restart your notebook kernel to pick up the specified package versions and re-run the imports cell above before proceeding with the lab. End of explanation """ ARTIFACT_STORE = os.path.join(os.sep, 'home', 'jupyter', 'artifact-store') SERVING_MODEL_DIR=os.path.join(os.sep, 'home', 'jupyter', 'serving_model') DATA_ROOT = './data' DATA_ROOT = f'{ARTIFACT_STORE}/data' !mkdir -p $DATA_ROOT """ Explanation: Configure lab settings Set constants, location paths and other environment settings. End of explanation """ data = pd.read_csv('./data/titles_sample.csv') data.head() LABEL_MAPPING = { 'github': 0, 'nytimes': 1, 'techcrunch': 2 } data['source'] = data['source'].apply(lambda label: LABEL_MAPPING[label]) data.head() data.to_csv(f'{DATA_ROOT}/dataset.csv', index=None) !head $DATA_ROOT/*.csv """ Explanation: Preparing the dataset End of explanation """ PIPELINE_NAME = 'tfx-title-classifier' PIPELINE_ROOT = os.path.join(ARTIFACT_STORE, PIPELINE_NAME, time.strftime("%Y%m%d_%H%M%S")) os.makedirs(PIPELINE_ROOT, exist_ok=True) context = InteractiveContext( pipeline_name=PIPELINE_NAME, pipeline_root=PIPELINE_ROOT, metadata_connection_config=None) """ Explanation: Interactive Context TFX Interactive Context allows you to create and run TFX Components in an interactive mode. It is designed to support experimentation and development in a Jupyter Notebook environment. It is an experimental feature and major changes to interface and functionality are expected. When creating the interactive context you can specifiy the following parameters: - pipeline_name - Optional name of the pipeline for ML Metadata tracking purposes. If not specified, a name will be generated for you. - pipeline_root - Optional path to the root of the pipeline's outputs. If not specified, an ephemeral temporary directory will be created and used. - metadata_connection_config - Optional metadata_store_pb2.ConnectionConfig instance used to configure connection to a ML Metadata connection. If not specified, an ephemeral SQLite MLMD connection contained in the pipeline_root directory with file name "metadata.sqlite" will be used. End of explanation """ output_config = example_gen_pb2.Output( split_config=example_gen_pb2.SplitConfig(splits=[ example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=4), example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=1) ])) example_gen = tfx.components.CsvExampleGen( input_base=DATA_ROOT, output_config=output_config) context.run(example_gen) """ Explanation: Ingesting data using ExampleGen In any ML development process the first step is to ingest the training and test datasets. The ExampleGen component ingests data into a TFX pipeline. It consumes external files/services to generate a set file files in the TFRecord format, which will be used by other TFX components. It can also shuffle the data and split into an arbitrary number of partitions. <img src=https://github.com/GoogleCloudPlatform/mlops-on-gcp/raw/master/images/ExampleGen.png width="300"> Configure and run CsvExampleGen End of explanation """ examples_uri = example_gen.outputs['examples'].get()[0].uri tfrecord_filenames = [ os.path.join(examples_uri, 'train', name) for name in os.listdir(os.path.join(examples_uri, 'train')) ] dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP") for tfrecord in dataset.take(2): example = tf.train.Example() example.ParseFromString(tfrecord.numpy()) for name, feature in example.features.feature.items(): if feature.HasField('bytes_list'): value = feature.bytes_list.value if feature.HasField('float_list'): value = feature.float_list.value if feature.HasField('int64_list'): value = feature.int64_list.value print('{}: {}'.format(name, value)) print('******') """ Explanation: Examine the ingested data End of explanation """ statistics_gen = tfx.components.StatisticsGen( examples=example_gen.outputs['examples']) context.run(statistics_gen) """ Explanation: Generating statistics using StatisticsGen The StatisticsGen component generates data statistics that can be used by other TFX components. StatisticsGen uses TensorFlow Data Validation. StatisticsGen generate statistics for each split in the ExampleGen component's output. In our case there two splits: train and eval. <img src=https://github.com/GoogleCloudPlatform/mlops-on-gcp/raw/master/images/StatisticsGen.png width="200"> Configure and run the StatisticsGen component End of explanation """ context.show(statistics_gen.outputs['statistics']) """ Explanation: Visualize statistics The generated statistics can be visualized using the tfdv.visualize_statistics() function from the TensorFlow Data Validation library or using a utility method of the InteractiveContext object. In fact, most of the artifacts generated by the TFX components can be visualized using InteractiveContext. End of explanation """ schema_gen = SchemaGen( statistics=statistics_gen.outputs['statistics'], infer_feature_shape=False) context.run(schema_gen) """ Explanation: Infering data schema using SchemaGen Some TFX components use a description input data called a schema. The schema is an instance of schema.proto. It can specify data types for feature values, whether a feature has to be present in all examples, allowed value ranges, and other properties. SchemaGen automatically generates the schema by inferring types, categories, and ranges from data statistics. The auto-generated schema is best-effort and only tries to infer basic properties of the data. It is expected that developers review and modify it as needed. SchemaGen uses TensorFlow Data Validation. The SchemaGen component generates the schema using the statistics for the train split. The statistics for other splits are ignored. <img src=https://github.com/GoogleCloudPlatform/mlops-on-gcp/raw/master/images/SchemaGen.png width="200"> Configure and run the SchemaGen components End of explanation """ context.show(schema_gen.outputs['schema']) """ Explanation: Visualize the inferred schema End of explanation """ schema_proto_path = '{}/{}'.format(schema_gen.outputs['schema'].get()[0].uri, 'schema.pbtxt') schema = tfdv.load_schema_text(schema_proto_path) """ Explanation: Updating the auto-generated schema In most cases the auto-generated schemas must be fine-tuned manually using insights from data exploration and/or domain knowledge about the data. For example, you know that in the covertype dataset there are seven types of forest cover (coded using 1-7 range) and that the value of the Slope feature should be in the 0-90 range. You can manually add these constraints to the auto-generated schema by setting the feature domain. Load the auto-generated schema proto file End of explanation """ schema_dir = os.path.join(ARTIFACT_STORE, 'schema') tf.io.gfile.makedirs(schema_dir) schema_file = os.path.join(schema_dir, 'schema.pbtxt') tfdv.write_schema_text(schema, schema_file) !cat {schema_file} """ Explanation: Modify the schema You can use the protocol buffer APIs to modify the schema using tfdv.set_somain. Review the TFDV library API documentation on setting a feature's domain. You can use the protocol buffer APIs to modify the schema. Review the Tensorflow Metadata proto definition for configuration options. Save the updated schema End of explanation """ schema_importer = ImporterNode( instance_name='Schema_Importer', source_uri=schema_dir, artifact_type=tfx.types.standard_artifacts.Schema, reimport=False) context.run(schema_importer) """ Explanation: Importing the updated schema using ImporterNode The ImporterNode component allows you to import an external artifact, including the schema file, so it can be used by other TFX components in your workflow. Configure and run the ImporterNode component End of explanation """ context.show(schema_importer.outputs['result']) """ Explanation: Visualize the imported schema End of explanation """ example_validator = ExampleValidator( instance_name="Data_Validation", statistics=statistics_gen.outputs['statistics'], schema=schema_importer.outputs['result']) context.run(example_validator) """ Explanation: Validating data with ExampleValidator The ExampleValidator component identifies anomalies in data. It identifies anomalies by comparing data statistics computed by the StatisticsGen component against a schema generated by SchemaGen or imported by ImporterNode. ExampleValidator can detect different classes of anomalies. For example it can: perform validity checks by comparing data statistics against a schema detect training-serving skew by comparing training and serving data. detect data drift by looking at a series of data. The ExampleValidator component validates the data in the eval split only. Other splits are ignored. <img src=https://github.com/GoogleCloudPlatform/mlops-on-gcp/raw/master/images/ExampleValidator.png width="350"> Configure and run the ExampleValidator component End of explanation """ train_uri = example_validator.outputs['anomalies'].get()[0].uri train_anomalies_filename = os.path.join(train_uri, "train/anomalies.pbtxt") !cat $train_anomalies_filename """ Explanation: Examine the output of ExampleValidator The output artifact of the ExampleValidator is the anomalies.pbtxt file describing an anomalies_pb2.Anomalies protobuf. End of explanation """ context.show(example_validator.outputs['output']) """ Explanation: Visualize validation results The file anomalies.pbtxt can be visualized using context.show. End of explanation """ %%writefile config.py FEATURE_KEY = 'title' LABEL_KEY = 'source' N_CLASSES = 3 HUB_URL = "https://tfhub.dev/google/nnlm-en-dim50/2" HUB_DIM = 50 N_NEURONS = 16 TRAIN_BATCH_SIZE = 5 EVAL_BATCH_SIZE = 5 MODEL_NAME = 'tfx_title_classifier' def transformed_name(key): return key + '_xf' %%writefile preprocessing.py import tensorflow as tf from config import ( LABEL_KEY, N_CLASSES, FEATURE_KEY, transformed_name ) def _fill_in_missing(x): default_value = '' if x.dtype == tf.string else 0 return tf.squeeze( tf.sparse.to_dense( tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]), default_value), axis=1) def preprocessing_fn(inputs): features = _fill_in_missing(inputs[FEATURE_KEY]) labels = _fill_in_missing(inputs[LABEL_KEY]) return { transformed_name(FEATURE_KEY): features, transformed_name(LABEL_KEY): labels } TRANSFORM_MODULE = 'preprocessing.py' """ Explanation: In our case no anomalies were detected in the eval split. For a detailed deep dive into data validation and schema generation refer to the lab-31-tfdv-structured-data lab. Preprocessing data with Transform The Transform component performs data transformation and feature engineering. The Transform component consumes tf.Examples emitted from the ExampleGen component and emits the transformed feature data and the SavedModel graph that was used to process the data. The emitted SavedModel can then be used by serving components to make sure that the same data pre-processing logic is applied at training and serving. The Transform component requires more code than many other components because of the arbitrary complexity of the feature engineering that you may need for the data and/or model that you're working with. It requires code files to be available which define the processing needed. <img src=https://github.com/GoogleCloudPlatform/mlops-on-gcp/raw/master/images/Transform.png width="400"> Define the pre-processing module To configure Transform, you need to encapsulate your pre-processing code in the Python preprocessing_fn function and save it to a python module that is then provided to the Transform component as an input. This module will be loaded by transform and the preprocessing_fn function will be called when the Transform component runs. In most cases, your implementation of the preprocessing_fn makes extensive use of TensorFlow Transform for performing feature engineering on your dataset. End of explanation """ transform = Transform( examples=example_gen.outputs['examples'], schema=schema_importer.outputs['result'], module_file=TRANSFORM_MODULE) context.run(transform) """ Explanation: Configure and run the Transform component. End of explanation """ os.listdir(transform.outputs['transform_graph'].get()[0].uri) """ Explanation: Examine the Transform component's outputs The Transform component has 2 outputs: transform_graph - contains the graph that can perform the preprocessing operations (this graph will be included in the serving and evaluation models). transformed_examples - contains the preprocessed training and evaluation data. Take a peek at the transform_graph artifact: it points to a directory containing 3 subdirectories: End of explanation """ os.listdir(transform.outputs['transformed_examples'].get()[0].uri) transform_uri = transform.outputs['transformed_examples'].get()[0].uri tfrecord_filenames = [ os.path.join(transform_uri, 'train', name) for name in os.listdir(os.path.join(transform_uri, 'train')) ] dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP") for tfrecord in dataset.take(4): example = tf.train.Example() example.ParseFromString(tfrecord.numpy()) for name, feature in example.features.feature.items(): if feature.HasField('bytes_list'): value = feature.bytes_list.value if feature.HasField('float_list'): value = feature.float_list.value if feature.HasField('int64_list'): value = feature.int64_list.value print('{}: {}'.format(name, value)) print('******') """ Explanation: And the transform.examples artifact End of explanation """ %%writefile model.py import tensorflow as tf import tensorflow_transform as tft from tensorflow.keras.callbacks import TensorBoard from tensorflow_hub import KerasLayer from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential from tfx_bsl.tfxio import dataset_options from config import ( HUB_URL, HUB_DIM, N_NEURONS, N_CLASSES, LABEL_KEY, TRAIN_BATCH_SIZE, EVAL_BATCH_SIZE, MODEL_NAME, transformed_name ) def _get_serve_tf_examples_fn(model, tf_transform_output): model.tft_layer = tf_transform_output.transform_features_layer() @tf.function def serve_tf_examples_fn(serialized_tf_examples): """Returns the output to be used in the serving signature.""" feature_spec = tf_transform_output.raw_feature_spec() feature_spec.pop(LABEL_KEY) parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec) transformed_features = model.tft_layer(parsed_features) return model(transformed_features) return serve_tf_examples_fn def _input_fn(file_pattern, data_accessor, tf_transform_output, batch_size=200): return data_accessor.tf_dataset_factory( file_pattern, dataset_options.TensorFlowDatasetOptions( batch_size=batch_size, label_key=transformed_name(LABEL_KEY)), tf_transform_output.transformed_metadata.schema ) def _load_hub_module_layer(): hub_module = KerasLayer( HUB_URL, output_shape=[HUB_DIM], input_shape=[], dtype=tf.string, trainable=True) return hub_module def _build_keras_model(): hub_module = _load_hub_module_layer() model = Sequential([ hub_module, Dense(N_NEURONS, activation='relu'), Dense(N_CLASSES, activation='softmax') ]) model.compile( optimizer='adam', loss='sparse_categorical_crossentropy', metrics=[tf.keras.metrics.SparseCategoricalAccuracy()] ) return model def run_fn(fn_args): tf_transform_output = tft.TFTransformOutput(fn_args.transform_output) train_dataset = _input_fn(fn_args.train_files, fn_args.data_accessor, tf_transform_output, TRAIN_BATCH_SIZE) eval_dataset = _input_fn(fn_args.eval_files, fn_args.data_accessor, tf_transform_output, EVAL_BATCH_SIZE) mirrored_strategy = tf.distribute.MirroredStrategy() with mirrored_strategy.scope(): model = _build_keras_model() tensorboard_callback = tf.keras.callbacks.TensorBoard( log_dir=fn_args.model_run_dir, update_freq='batch') model.fit( train_dataset, steps_per_epoch=fn_args.train_steps, validation_data=eval_dataset, validation_steps=fn_args.eval_steps, callbacks=[tensorboard_callback]) signatures = { 'serving_default': _get_serve_tf_examples_fn(model, tf_transform_output).get_concrete_function( tf.TensorSpec( shape=[None], dtype=tf.string, name='examples')), } model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures) TRAINER_MODULE_FILE = 'model.py' """ Explanation: Train your TensorFlow model with the Trainer component The Trainer component trains a model using TensorFlow. Trainer takes: tf.Examples used for training and eval. A user provided module file that defines the trainer logic. A data schema created by SchemaGen or imported by ImporterNode. A proto definition of train args and eval args. An optional transform graph produced by upstream Transform component. An optional base models used for scenarios such as warmstarting training. <img src=https://github.com/GoogleCloudPlatform/mlops-on-gcp/raw/master/images/Trainer.png width="400"> Define the trainer module To configure Trainer, you need to encapsulate your training code in a Python module that is then provided to the Trainer as an input. End of explanation """ trainer = Trainer( custom_executor_spec=executor_spec.ExecutorClassSpec(trainer_executor.GenericExecutor), module_file=TRAINER_MODULE_FILE, transformed_examples=transform.outputs.transformed_examples, schema=schema_importer.outputs.result, transform_graph=transform.outputs.transform_graph, train_args=trainer_pb2.TrainArgs(splits=['train'], num_steps=20), eval_args=trainer_pb2.EvalArgs(splits=['eval'], num_steps=5)) context.run(trainer) """ Explanation: Create and run the Trainer component As of the 0.25.0 release of TFX, the Trainer component only supports passing a single field - num_steps - through the train_args and eval_args arguments. End of explanation """ logs_path = trainer.outputs['model_run'].get()[0].uri print(logs_path) """ Explanation: Analyzing training runs with TensorBoard In this step you will analyze the training run with TensorBoard.dev. TensorBoard.dev is a managed service that enables you to easily host, track and share your ML experiments. Retrieve the location of TensorBoard logs Each model run's train and eval metric logs are written to the model_run directory by the Tensorboard callback defined in model.py. End of explanation """ model_resolver = ResolverNode( instance_name='latest_blessed_model_resolver', resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver, model=Channel(type=Model), model_blessing=Channel(type=ModelBlessing)) context.run(model_resolver) """ Explanation: Upload the logs and start TensorBoard.dev Open a new JupyterLab terminal window From the terminal window, execute the following command tensorboard dev upload --logdir [YOUR_LOGDIR] Where [YOUR_LOGDIR] is an URI retrieved by the previous cell. You will be asked to authorize TensorBoard.dev using your Google account. If you don't have a Google account or you don't want to authorize TensorBoard.dev you can skip this exercise. After the authorization process completes, follow the link provided to view your experiment. Evaluating trained models with Evaluator The Evaluator component analyzes model performance using the TensorFlow Model Analysis library. It runs inference requests on particular subsets of the test dataset, based on which slices are defined by the developer. Knowing which slices should be analyzed requires domain knowledge of what is important in this particular use case or domain. The Evaluator can also optionally validate a newly trained model against a previous model. In this lab, you only train one model, so the Evaluator automatically will label the model as "blessed". <img src=https://github.com/GoogleCloudPlatform/mlops-on-gcp/raw/master/images/Evaluator.png width="400"> Configure and run the Evaluator component Use the ResolverNode to pick the previous model to compare against. The model resolver is only required if performing model validation in addition to evaluation. In this case we validate against the latest blessed model. If no model has been blessed before (as in this case) the evaluator will make our candidate the first blessed model. End of explanation """ accuracy_threshold = tfma.MetricThreshold( value_threshold=tfma.GenericValueThreshold( lower_bound={'value': 0.30}, upper_bound={'value': 0.99}) ) metrics_specs = tfma.MetricsSpec( metrics = [ tfma.MetricConfig(class_name='SparseCategoricalAccuracy', threshold=accuracy_threshold), tfma.MetricConfig(class_name='ExampleCount')]) eval_config = tfma.EvalConfig( model_specs=[ tfma.ModelSpec(label_key='source') ], metrics_specs=[metrics_specs] ) eval_config model_analyzer = Evaluator( examples=example_gen.outputs.examples, model=trainer.outputs.model, baseline_model=model_resolver.outputs.model, eval_config=eval_config ) context.run(model_analyzer, enable_cache=False) """ Explanation: Configure evaluation metrics and slices. End of explanation """ model_blessing_uri = model_analyzer.outputs.blessing.get()[0].uri !ls -l {model_blessing_uri} """ Explanation: Check the model performance validation status End of explanation """ trainer.outputs['model'] pusher = Pusher( model=trainer.outputs['model'], model_blessing=model_analyzer.outputs['blessing'], push_destination=pusher_pb2.PushDestination( filesystem=pusher_pb2.PushDestination.Filesystem( base_directory=SERVING_MODEL_DIR))) context.run(pusher) """ Explanation: Deploying models with Pusher The Pusher component checks whether a model has been "blessed", and if so, deploys it by pushing the model to a well known file destination. <img src=https://github.com/GoogleCloudPlatform/mlops-on-gcp/raw/master/images/Pusher.png width="400"> Configure and run the Pusher component End of explanation """ pusher.outputs # Set `PATH` to include a directory containing `saved_model_cli. PATH=%env PATH %env PATH=/opt/conda/envs/tfx/bin:{PATH} latest_pushed_model = os.path.join(SERVING_MODEL_DIR, max(os.listdir(SERVING_MODEL_DIR))) !saved_model_cli show --dir {latest_pushed_model} --all """ Explanation: Examine the output of Pusher End of explanation """
ceos-seo/data_cube_notebooks
notebooks/Data_Challenge/Weather.ipynb
apache-2.0
# Supress Warnings import warnings warnings.filterwarnings('ignore') # Import common GIS tools import numpy as np import xarray as xr import matplotlib.pyplot as plt import rasterio.features import folium import math # Import Planetary Computer tools import pystac_client import planetary_computer """ Explanation: 2022 EY Challenge - Weather Data This notebook is used to create a weather dataset from <b>"TerraClimate"</b>. These data parameters, or their variability, can be used as "predictor variables" to relate to species samples. TerraClimate is a dataset of monthly climate and climatic water balance for global terrestrial surfaces from 1958-2019. These data provide important inputs for ecological and hydrological studies at global scales that require high spatial resolution and time-varying data. All data have monthly temporal resolution and a 4-km (0.036 degree) spatial resolution. The data can be found in the MS Planetary Computer catalog: https://planetarycomputer.microsoft.com/dataset/terraclimate End of explanation """ # Define the bounding box using corners min_lon, min_lat = (150.62, -33.69) # Lower-left corner (longitude, latitude) max_lon, max_lat = (150.83, -33.48) # Upper-right corner (longitude, latitude) bbox = (min_lon, min_lat, max_lon, max_lat) latitude = (min_lat, max_lat) longitude = (min_lon, max_lon) def _degree_to_zoom_level(l1, l2, margin = 0.0): degree = abs(l1 - l2) * (1 + margin) zoom_level_int = 0 if degree != 0: zoom_level_float = math.log(360/degree)/math.log(2) zoom_level_int = int(zoom_level_float) else: zoom_level_int = 18 return zoom_level_int def display_map(latitude = None, longitude = None): margin = -0.5 zoom_bias = 0 lat_zoom_level = _degree_to_zoom_level(margin = margin, *latitude ) + zoom_bias lon_zoom_level = _degree_to_zoom_level(margin = margin, *longitude) + zoom_bias zoom_level = min(lat_zoom_level, lon_zoom_level) center = [np.mean(latitude), np.mean(longitude)] map_hybrid = folium.Map(location=center,zoom_start=zoom_level, tiles=" http://mt1.google.com/vt/lyrs=y&z={z}&x={x}&y={y}",attr="Google") line_segments = [(latitude[0],longitude[0]),(latitude[0],longitude[1]), (latitude[1],longitude[1]),(latitude[1],longitude[0]), (latitude[0],longitude[0])] map_hybrid.add_child(folium.features.PolyLine(locations=line_segments,color='red',opacity=0.8)) map_hybrid.add_child(folium.features.LatLngPopup()) return map_hybrid # Plot bounding box on a map f = folium.Figure(width=600, height=600) m = display_map(latitude,longitude) f.add_child(m) """ Explanation: Define the analysis region and view on a map First, we define our area of interest using latitude and longitude coordinates. Our test region is near Richmond, NSW, Australia. The first line defines the lower-left corner of the bounding box and the second line defines the upper-right corner of the bounding box. GeoJSON format uses a specific order: (longitude, latitude), so be careful when entering the coordinates. End of explanation """ import pystac collection = pystac.read_file("https://planetarycomputer.microsoft.com/api/stac/v1/collections/terraclimate") asset = collection.assets["zarr-https"] import fsspec import xarray as xr store = fsspec.get_mapper(asset.href) data = xr.open_zarr(store, **asset.extra_fields["xarray:open_kwargs"]) # View the dimensions, coordinates and variables of the dataset # Click on the "data variables" arrow to expand the output and see all of the possible variables data """ Explanation: Discover and load the data for analysis Using <b>pystac</b>, we will load the collection with links to all of the metadata about this dataset. The collection contains assets, which are links to the root of a Zarr store, which can be opened with xarray. Once loaded and opened, we can select the parameters we desire for further analyses. Overall, there are 18 variables for consideration. You can click the "Data Variables" output "arrow" for a list of those parameters. End of explanation """ clipped_data = data.sel(lon=slice(min_lon,max_lon),lat=slice(max_lat,min_lat),time=slice('2015-01-01','2019-12-31')) parsed_data = clipped_data[['tmax', 'tmin', 'ppt', 'soil']] # View the dimensions: time is in months, lat/lon is in pixels # View the coordinates: lat, lon and time # View the data variables we have parsed from the overall dataset parsed_data """ Explanation: Parsing the dataset Assuming we do not need all 18 variables and global coverage, we will "clip" the data to our region and select a time window that is representative of weather over this area (e.g. 5 years). Finally, we will select the variables we think are most likely to be "predictor variables" for frog species habitats. These are:<br> <br>maximum air temperature (tmax) - 2 meters above surface <br>minimum air temperature (tmin) - 2 meters above surface <br>accumulated precipitation (ppt) - accumulated monthly in millimeters <br>soil moisture (soil) - soil moisture in millimeters at end of month End of explanation """ fig = plt.figure(figsize=(12, 6)) parsed_data["tmax"].mean(dim=["lat","lon"]).plot(marker='o',markersize=4,linewidth=1) parsed_data["tmin"].mean(dim=["lat","lon"]).plot(marker='o',markersize=4,linewidth=1) plt.title("Mean Monthly Maximum and Minimum Air Temperature") plt.show() # Precipitation precip = (parsed_data["ppt"]).mean(dim=["lat","lon"]) precip.plot(figsize=(12, 6),marker='o',markersize=4,linewidth=1) plt.title("Mean Accumulated Precipitation (mm)") plt.show() # Soil Moisture moisture = (parsed_data["soil"]).mean(dim=["lat","lon"]) moisture.plot(figsize=(12, 6),marker='o',markersize=4,linewidth=1) plt.title("Mean Soil Moisture (mm)") plt.show() """ Explanation: Data Analysis Now we will look closer at some of this data using plotting and statistics. End of explanation """ val_maxtemp = parsed_data.tmax.mean(dim=["time"]).sel(lon=150.71, lat=-33.51, method="nearest").values print("This is the mean maximum air temperature for the closest pixel: ", np.round(val_maxtemp,1)) val_allmaxtemp = parsed_data.tmax.max(dim=["time"]).sel(lon=150.71, lat=-33.51, method="nearest").values print("This is the overall maximum air temperature for the closest pixel: ", np.round(val_allmaxtemp,1)) val_mintemp = parsed_data.tmin.mean(dim=["time"]).sel(lon=150.71, lat=-33.51, method="nearest").values print("This is the mean minimum air temperature for the closest pixel: ", np.round(val_mintemp,1)) val_allmintemp = parsed_data.tmin.min(dim=["time"]).sel(lon=150.71, lat=-33.51, method="nearest").values print("This is the overall minimum air temperature for the closest pixel: ", np.round(val_allmintemp,1)) val_precip = parsed_data.ppt.mean(dim=["time"]).sel(lon=150.71, lat=-33.51, method="nearest").values print("This is the mean accumulated precipitation for the closest pixel: ", np.round(val_precip,1)) val_soil = parsed_data.soil.mean(dim=["time"]).sel(lon=150.71, lat=-33.51, method="nearest").values print("This is the mean soil moisture for the closest pixel: ", np.round(val_soil,1)) """ Explanation: How will the participants use this data? The GeoTIFF file will contain the Lat-Lon coordinates of each pixel and will also contain the selected parameters for each pixel as separate data layers. Since the FrogID data is also Lat-Lon position, it is possible to find the closest pixel using code similar to what is demonstrated below. Once this pixel is found, then the corresponding parameter data values can be used for modeling species distribution. One example is to use the long-term mean values of a given parameter for a particular location and use that value for species modeling. Below are some sample results for a random Lat-Lon location. End of explanation """ filename = "Soil_Moisture_sample.csv" parsed_data["soil"].sel(lon=150.71, lat=-33.51, method="nearest").to_dataframe().to_csv(filename) # Show the location and size of the new output file !ls *.csv -lah """ Explanation: Saving the time series data in CSV format Now, what if you want to save the time series data for a particular parameter and Lat-Lon location? We can use the example above to specify a Lat-Lon position and then save the time series soil moisture data for that location. Below is the code that will create this CSV file. The file will contain columns for time (month), latitude and longitude (for the nearest pixel) and soil moisture. End of explanation """
poldrack/fmri-analysis-vm
analysis/machinelearning/MachineLearningBasics.ipynb
mit
import numpy,pandas %matplotlib inline import matplotlib.pyplot as plt import scipy.stats from sklearn.model_selection import LeaveOneOut,KFold from sklearn.preprocessing import PolynomialFeatures,scale from sklearn.linear_model import LinearRegression,LassoCV,Ridge import seaborn as sns import statsmodels.formula.api as sm from statsmodels.tools.tools import add_constant recreate=True if recreate: seed=20698 else: seed=numpy.ceil(numpy.random.rand()*100000).astype('int') print(seed) numpy.random.seed(seed) def make_continuous_data(mean=[45,100],var=[10,10],cor=-0.6,N=100): """ generate a synthetic data set with two variables """ cor=numpy.array([[1.,cor],[cor,1.]]) var=numpy.array([[var[0],0],[0,var[1]]]) cov=var.dot(cor).dot(var) return numpy.random.multivariate_normal(mean,cov,N) n=50 d=make_continuous_data(N=n) y=d[:,1] plt.scatter(d[:,0],d[:,1]) plt.xlabel('age') plt.ylabel('processing speed') print('data R-squared: %f'%numpy.corrcoef(d.T)[0,1]**2) """ Explanation: This notebook provides an introduction to some of the basic concepts of machine learning. Let's start by generating some data to work with. Let's say that we have a dataset that has tested people on two continuous measures (processing speed and age) and one discrete measure (diagnosis with any psychiatric disorder). First let's create the continuous data assuming that there is a relationship between these two variables. We will make a function to generate a new dataset, since we will need to do this multiple times. End of explanation """ def loglike(y,yhat,s2=None,verbose=True): N = len(y) SSR = numpy.sum((y-yhat)**2) if s2 is None: # use observed stdev s2 = SSR / float(N) logLike = -(n/2.)*numpy.log(s2) - (n/2.)*numpy.log(2*numpy.pi) - SSR/(2*s2) if verbose: print('SSR:',SSR) print('s2:',s2) print('logLike:',logLike) return logLike logLike_null=loglike(y,numpy.zeros(len(y)),s2=1) """ Explanation: What is the simplest story that we could tell about processing speed these data? Well, we could simply say that the variable is normal with a mean of zero and a standard deviation of 1. Let's see how likely the observed processing speed values are given that set of parameters. First, let's create a function that returns the normal log-likelihood of the data given a set of predicted values. End of explanation """ mean=numpy.mean(y) print('mean:',mean) pred=numpy.ones(len(y))*mean logLike_mean=loglike(y,pred,s2=1) """ Explanation: We are pretty sure that the mean of our variables is not zero, so let's compute the mean and see if the likelihood of the data is higher. End of explanation """ var=numpy.var(y) print('variance',var) pred=numpy.ones(len(y))*mean logLike_mean_std=loglike(y,pred) """ Explanation: What about using the observed variance as well? End of explanation """ X=d[:,0] X=add_constant(X) result = sm.OLS( y, X ).fit() print(result.summary()) intercept=result.params[0] slope=result.params[1] pred=result.predict(X) logLike_ols=loglike(y,pred) plt.scatter(y,pred) print('processing speed = %f + %f*age'%(intercept,slope)) print('p =%f'%result.pvalues[1]) def get_RMSE(y,pred): return numpy.sqrt(numpy.mean((y - pred)**2)) def get_R2(y,pred): """ compute r-squared""" return numpy.corrcoef(y,pred)[0,1]**2 ax=plt.scatter(d[:,0],d[:,1]) plt.xlabel('age') plt.ylabel('processing speed') plt.plot(d[:,0], slope * d[:,0] + intercept, color='red') # plot residual lines d_predicted=slope*d[:,0] + intercept for i in range(d.shape[0]): x=d[i,0] y=d[i,1] plt.plot([x,x],[d_predicted[i],y],color='blue') RMSE=get_RMSE(d[:,1],d_predicted) rsq=get_R2(d[:,1],d_predicted) print('rsquared=%f'%rsq) """ Explanation: Is there a relation between processing speed and age? Compute the linear regression equation to find out. End of explanation """ d_new=make_continuous_data(N=n) d_new_predicted=intercept + slope*d_new[:,0] RMSE_new=get_RMSE(d_new[:,1],d_new_predicted) rsq_new=get_R2(d_new[:,1],d_new_predicted) print('R2 for new data: %f'%rsq_new) ax=plt.scatter(d_new[:,0],d_new[:,1]) plt.xlabel('age') plt.ylabel('processing speed') plt.plot(d_new[:,0], slope * d_new[:,0] + intercept, color='red') """ Explanation: This shows us that linear regression can provide a simple description of a complex dataset - we can describe the entire dataset in 2 numbers. Now let's ask how good this description is for a new dataset generated by the same process: End of explanation """ nruns=100 slopes=numpy.zeros(nruns) intercepts=numpy.zeros(nruns) rsquared=numpy.zeros(nruns) fig = plt.figure() ax = fig.gca() for i in range(nruns): data=make_continuous_data(N=n) slopes[i],intercepts[i],_,_,_=scipy.stats.linregress(data[:,0],data[:,1]) ax.plot(data[:,0], slopes[i] * data[:,0] + intercepts[i], color='red', alpha=0.05) pred_orig=intercept + slope*data[:,0] rsquared[i]=get_R2(data[:,1],pred_orig) print('Original R2: %f'%rsq) print('Mean R2 for new datasets on original model: %f'%numpy.mean(rsquared)) """ Explanation: Now let's do this 100 times and look at how variable the fits are. End of explanation """ # initialize the sklearn leave-one-out operator loo=LeaveOneOut() for train,test in loo.split(range(10)): print('train:',train,'test:',test) """ Explanation: Cross-validation The results above show that the fit of the model to the observed data overestimates our ability to predict on new data. In many cases we would like to be able to quantify how well our model generalizes to new data, but it's often not possible to collect additional data. The concept of cross-validation provides us with a way to measure how well a model generalizes. The idea is to iteratively train the model on subsets of the data and then test the model on the left-out portion. Let's first see what cross-validation looks like. Perhaps the simplest version to understand is "leave-one-out" crossvalidation, so let's look at that. Here is what the training and test datasets would look like for a dataset with 10 observations; in reality this is way too few observations, but we will use it as an exmaple End of explanation """ # initialize the sklearn leave-one-out operator kf=KFold(n_splits=5,shuffle=True) for train,test in kf.split(range(10)): print('train:',train,'test:',test) """ Explanation: It is often more common to use larger test folds, both to speed up performance (since LOO can require lots of model fitting when there are a large number of observations) and because LOO error estimates can have high variance due to the fact that the models are so highly correlated. This is referred to as K-fold cross-validation; generally we want to choose K somewhere around 5-10. It's generally a good idea to shuffle the order of the observations so that the folds are grouped randomly. End of explanation """ loo=LeaveOneOut() slopes_loo=numpy.zeros(n) intercepts_loo=numpy.zeros(n) pred=numpy.zeros(n) ctr=0 for train,test in loo.split(range(n)): slopes_loo[ctr],intercepts_loo[ctr],_,_,_=scipy.stats.linregress(d[train,0],d[train,1]) pred[ctr]=intercepts_loo[ctr] + slopes_loo[ctr]*data[test,0] ctr+=1 print('R2 for leave-one-out prediction: %f'%get_R2(pred,data[:,1])) plt.figure(figsize=(12,5)) plt.subplot(1,2,1) _=plt.hist(slopes_loo,20) plt.xlabel('slope estimate') plt.ylabel('frequency') plt.subplot(1,2,2) _=plt.hist(intercepts_loo,20) plt.xlabel('intercept estimate') plt.ylabel('frequency') """ Explanation: Now let's perform leave-one-out cross-validation on our original dataset, so that we can compare it to the performance on new datasets. We expect that the correlation between LOO estimates and actual data should be very similar to the Mean R2 for new datasets. We can also plot a histogram of the estimates, to see how they vary across folds. End of explanation """ # add an outlier data_null=make_continuous_data(N=n,cor=0.0) outlier_multiplier=2.0 data=numpy.vstack((data_null,[numpy.max(data_null[:,0])*outlier_multiplier, numpy.max(data_null[:,1])*outlier_multiplier*-1])) plt.scatter(data[:,0],data[:,1]) slope,intercept,r,p,se=scipy.stats.linregress(data[:,0],data[:,1]) plt.plot([numpy.min(data[:,0]),intercept + slope*numpy.min(data[:,0])], [numpy.max(data[:,0]),intercept + slope*numpy.max(data[:,0])]) rsq_outlier=r**2 print('R2 for regression with outlier: %f'%rsq_outlier) loo=LeaveOneOut() pred_outlier=numpy.zeros(data.shape[0]) ctr=0 for train,test in loo.split(range(data.shape[0])): s,i,_,_,_=scipy.stats.linregress(data[train,0],data[train,1]) pred_outlier[ctr]=i + s*data[test,0] ctr+=1 print('R2 for leave-one-out prediction: %f'%get_R2(pred_outlier,data[:,1])) """ Explanation: Now let's look at the effect of outliers on in-sample correlation and out-of-sample prediction. End of explanation """ # from https://gist.github.com/iizukak/1287876 def gram_schmidt_columns(X): Q, R = numpy.linalg.qr(X) return Q def make_continuous_data_poly(mean=0,var=1,betaval=5,order=1,N=100): """ generate a synthetic data set with two variables allowing polynomial functions up to 5-th order """ x=numpy.random.randn(N) x=x-numpy.mean(x) pf=PolynomialFeatures(5,include_bias=False) x_poly=gram_schmidt_columns(pf.fit_transform(x[:,numpy.newaxis])) betas=numpy.zeros(5) betas[0]=mean for i in range(order): betas[i]=betaval func=x_poly.dot(betas)+numpy.random.randn(N)*var d=numpy.vstack((x,func)).T return d,x_poly n=25 trueorder=2 data,x_poly=make_continuous_data_poly(N=n,order=trueorder) # fit models of increasing complexity npolyorders=7 plt.figure() plt.scatter(data[:,0],data[:,1]) plt.title('fitted data') xp=numpy.linspace(numpy.min(data[:,0]),numpy.max(data[:,0]),100) for i in range(npolyorders): f = numpy.polyfit(data[:,0], data[:,1], i) p=numpy.poly1d(f) plt.plot(xp,p(xp)) plt.legend(['%d'%i for i in range(npolyorders)]) # compute in-sample and out-of-sample error using LOO loo=LeaveOneOut() pred=numpy.zeros((n,npolyorders)) mean_trainerr=numpy.zeros(npolyorders) prederr=numpy.zeros(npolyorders) for i in range(npolyorders): ctr=0 trainerr=numpy.zeros(n) for train,test in loo.split(range(data.shape[0])): f = numpy.polyfit(data[train,0], data[train,1], i) p=numpy.poly1d(f) trainerr[ctr]=numpy.sqrt(numpy.mean((data[train,1]-p(data[train,0]))**2)) pred[test,i]=p(data[test,0]) ctr+=1 mean_trainerr[i]=numpy.mean(trainerr) prederr[i]=numpy.sqrt(numpy.mean((data[:,1]-pred[:,i])**2)) plt.plot(range(npolyorders),mean_trainerr) plt.plot(range(npolyorders),prederr,color='red') plt.xlabel('Polynomial order') plt.ylabel('root mean squared error') plt.legend(['training error','test error'],loc=9) plt.plot([numpy.argmin(prederr),numpy.argmin(prederr)], [numpy.min(mean_trainerr),numpy.max(prederr)],'k--') plt.text(0.5,numpy.max(mean_trainerr),'underfitting') plt.text(4.5,numpy.max(mean_trainerr),'overfitting') print('True order:',trueorder) print('Order estimated by cross validation:',numpy.argmin(prederr)) """ Explanation: Model selection Often when we are fitting models to data we have to make decisions about the complexity of the model; after all, if the model has as many parameters as there are data points then we can fit the data exactly, but as we saw above, this model will not generalize very well to other datasets. To see how we can use cross-validation to select our model complexity, let's generate some data with a certain polynomial order, and see whether crossvalidation can find the right model order. End of explanation """ def make_larger_dataset(beta,n,sd=1): X=numpy.random.randn(n,len(beta)) # design matrix beta=numpy.array(beta) y=X.dot(beta)+numpy.random.randn(n)*sd return(y-numpy.mean(y),X) """ Explanation: Bias-variance tradeoffs Another way to think about model complexity is in terms of bias-variance tradeoffs. Bias is the average distance between the prediction of our model and the correct value, whereas variance is the average distance between different predictions from the model. In standard statistics classes it is often taken as a given that an unbiased estimate is always best, but within machine learning we will often see that a bit of bias can go a long way towards reducing variance, and that some kinds of bias make particular sense. Let's start with an example using linear regression. First, we will generate a dataset with 20 variables and 100 observations, but only two of the variables are actually related to the outcome (the rest are simply random noise). End of explanation """ def compare_lr_lasso(n=100,nvars=20,n_splits=8,sd=1): beta=numpy.zeros(nvars) beta[0]=1 beta[1]=-1 y,X=make_larger_dataset(beta,100,sd=1) kf=KFold(n_splits=n_splits,shuffle=True) pred_lr=numpy.zeros(X.shape[0]) coefs_lr=numpy.zeros((n_splits,X.shape[1])) pred_lasso=numpy.zeros(X.shape[0]) coefs_lasso=numpy.zeros((n_splits,X.shape[1])) lr=LinearRegression() lasso=LassoCV() ctr=0 for train,test in kf.split(X): Xtrain=X[train,:] Ytrain=y[train] lr.fit(Xtrain,Ytrain) lasso.fit(Xtrain,Ytrain) pred_lr[test]=lr.predict(X[test,:]) coefs_lr[ctr,:]=lr.coef_ pred_lasso[test]=lasso.predict(X[test,:]) coefs_lasso[ctr,:]=lasso.coef_ ctr+=1 prederr_lr=numpy.sum((pred_lr-y)**2) prederr_lasso=numpy.sum((pred_lasso-y)**2) return [prederr_lr,prederr_lasso],numpy.mean(coefs_lr,0),numpy.mean(coefs_lasso,0),beta """ Explanation: Now let's fit two different models to the data that we will generate. First, we will fit a standard linear regression model, using ordinary least squares. This is the best linear unbiased estimator for the regression model. We will also fit a model that uses regularization, which places some constraints on the parameter estimates. In this case, we use the Lasso model, which minimizes the sum of squares while also constraining (or penalizing) the sum of the absolute parameter estimates (known as an L1 penalty). The parameter estimates of this model will be biased towards zero, and will be sparse, meaning that most of the estimates will be exactly zero. One complication of the Lasso model is that we need to select a value for the alpha parameter, which determines how much penalty there will be. We will use crossvalidation within the training data set to do this; the sklearn LassoCV() function does it for us automatically. Let's generate a function that can run both standard regression and Lasso regression. End of explanation """ nsims=100 prederr=numpy.zeros((nsims,2)) lrcoef=numpy.zeros((nsims,20)) lassocoef=numpy.zeros((nsims,20)) for i in range(nsims): prederr[i,:],lrcoef[i,:],lassocoef[i,:],beta=compare_lr_lasso() print('mean sum of squared error:') print('linear regression:',numpy.mean(prederr,0)[0]) print('lasso:',numpy.mean(prederr,0)[1]) """ Explanation: Let's run the simulation 100 times and look at the average parameter estimates. End of explanation """ coefs_df=pandas.DataFrame({'True value':beta,'Regression (mean)':numpy.mean(lrcoef,0),'Lasso (mean)':numpy.mean(lassocoef,0), 'Regression(stdev)':numpy.std(lrcoef,0),'Lasso(stdev)':numpy.std(lassocoef,0)}) coefs_df """ Explanation: The prediction error for the Lasso model is substantially less than the error for the linear regression model. What about the parameters? Let's display the mean parameter estimates and their variabilty across runs. End of explanation """ nsims=100 prederr=numpy.zeros((nsims,2)) lrcoef=numpy.zeros((nsims,1000)) lassocoef=numpy.zeros((nsims,1000)) for i in range(nsims): prederr[i,:],lrcoef[i,:],lassocoef[i,:],beta=compare_lr_lasso(nvars=1000) print('mean sum of squared error:') print('linear regression:',numpy.mean(prederr,0)[0]) print('lasso:',numpy.mean(prederr,0)[1]) """ Explanation: Another place where regularization is essential is when your data are wider than they are tall - that is, when you have more variables than observations. This is almost always the case for brain imaging data, when the number of voxels far outweighs the number of subjects or events. In this case, the ordinary least squares solution is ill-posed, meaning that it has an infinite number of possible solutions. The sklearn LinearRegression() estimator will return an estimate even in this case, but the parameter estimates will be highly variable. However, we can use a regularized regression technique to find more robust estimates in this case. Let's run the same simulation, but now put 1000 variables instead of 20. This will take a few minutes to execute. End of explanation """
nkundiushuti/pydata2017bcn
TensorBoardDemo.ipynb
gpl-3.0
from keras.models import Model from keras.layers import Convolution2D, BatchNormalization, MaxPooling2D, Flatten, Dense from keras.layers import Input, Dropout from keras.layers.advanced_activations import ELU from keras.regularizers import l2 from keras.optimizers import SGD import tensorflow as tf from settings import * import numpy as np import os import dataset from dataset import MyDataset db=MyDataset(feature_dir=os.path.join('./IRMAS-Sample', 'features', 'Training'), batch_size=8, time_context=128, step=50, suffix_in='_mel_',suffix_out='_label_',floatX=np.float32,train_percent=0.8) val_data = db() """ Explanation: Training Visualization In this part we’ll see how to create a simple but wrong model with Keras, and, gradually, how it can be improved with step-by-step debugging and understanding with TensorBoard. Please, download log files first. Let's start from importing all necessary components/layers for the future CNN. End of explanation """ def build_model(n_classes): input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1) channel_axis = 3 melgram_input = Input(shape=input_shape) m_size = 70 n_size = 3 n_filters = 64 maxpool_const = 4 x = Convolution2D(n_filters, (m_size, n_size), padding='same', kernel_initializer='zeros', kernel_regularizer=l2(1e-5))(melgram_input) x = BatchNormalization(axis=channel_axis)(x) x = ELU()(x) x = MaxPooling2D(pool_size=(N_MEL_BANDS, SEGMENT_DUR/maxpool_const))(x) x = Flatten()(x) x = Dropout(0.5)(x) x = Dense(n_classes, kernel_initializer='zeros', kernel_regularizer=l2(1e-5), activation='softmax', name='prediction')(x) model = Model(melgram_input, x) return model model = build_model(IRMAS_N_CLASSES) """ Explanation: Toy convolutional model for classification First, we create a skeleton for a model with one convolutional and one dense layer End of explanation """ init_lr = 0.001 optimizer = SGD(lr=init_lr, momentum=0.9, nesterov=True) """ Explanation: We can train the model on IRMAS data using the training procedure below. First, we have to define the optimizer. We're using Stochastic Gradient Descent with Momentum End of explanation """ model.summary() model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) """ Explanation: Now we can check the model structure, specify which metrics we would like to keep eye on and compile the model. End of explanation """ model.fit_generator(db, steps_per_epoch=4, epochs=4, verbose=2, validation_data=val_data, class_weight=None, workers=1) """ Explanation: From the previous part, we have two generators which can provide us training samples and validation samples. We will use them during the training. We also specify the number of steps per epoch, the total number of epoch and the log verbosity level End of explanation """ from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping, TensorBoard early_stopping = EarlyStopping(monitor='val_loss', patience=EARLY_STOPPING_EPOCH) save_clb = ModelCheckpoint("{weights_basepath}/".format(weights_basepath=MODEL_WEIGHT_BASEPATH) + "epoch.{epoch:02d}-val_loss.{val_loss:.3f}", monitor='val_loss', save_best_only=True) """ Explanation: As we can see, neither validation nor the training metrics have improved, so we need to explore that's wrong with the model. Keras Callbacks will help us in this. Keras Callbacks The Callback in Keras is a set of functions to be applied to a certain event during the training process. The typical triggers for events are: * on_epoch_begin * on_epoch_end * on_batch_begin * on_batch_end * on_train_begin * on_train_end There are some useful callbacks: End of explanation """ tb = TensorBoard(log_dir='./example_1', write_graph=True, write_grads=True, write_images=True, histogram_freq=1) # if we want to compute activations and weight histogram, we need to specify the validation data for that. tb.validation_data = val_data """ Explanation: Let's get acquainted with the TensorBoard Callback. The parameters are: * log_dir - where to store the logs, metadata, and events of the model training process * write_graph - whether or not to write the graph of data and control dependencies * write_grads - whether or not to save the parameters of the model for visualisation * histogram_freq - how often to save the parameters of the model * write_images - whether or not to save the weight and visualise them as images End of explanation """ model.fit_generator(db, steps_per_epoch=1, # change to STEPS_PER_EPOCH epochs=1, # change to MAX_EPOCH_NUM verbose=2, validation_data=val_data, callbacks=[save_clb, early_stopping, tb], class_weight=None, workers=1) """ Explanation: Now we can add the callbacks to the training process and observe the corresponding events and obtain the corresponding logs. End of explanation """ def build_model(n_classes): input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1) channel_axis = 3 melgram_input = Input(shape=input_shape) m_size = 70 n_size = 3 n_filters = 64 maxpool_const = 4 x = Convolution2D(n_filters, (m_size, n_size), padding='same', kernel_initializer='he_normal', kernel_regularizer=l2(1e-5))(melgram_input) x = BatchNormalization(axis=channel_axis)(x) x = ELU()(x) x = MaxPooling2D(pool_size=(N_MEL_BANDS, SEGMENT_DUR/maxpool_const))(x) x = Flatten()(x) x = Dropout(0.5)(x) x = Dense(n_classes, kernel_initializer='he_normal', kernel_regularizer=l2(1e-5), activation='softmax', name='prediction')(x) model = Model(melgram_input, x) return model model = build_model(IRMAS_N_CLASSES) """ Explanation: You can download the event files for all runs from here. Now create the ./logs directory and launch TensorBoard bash tar -xvzf logs.tar.gz cd logs tensorboard --logdir ./example_1 and navigate to http://0.0.0.0:6006 We can notice, that it's almost impossible to see anything on the Graphs tab but we can see vividly that the metrics on the Scalar tab are not improving and the gradients values on the Histograms tab are zero. Our problem is in the weights initialization kernel_initializer='zeros' so now we can fix it and define new model. End of explanation """ global_namescope = 'train' def build_model(n_classes): with tf.name_scope('input'): input_shape = (N_MEL_BANDS, SEGMENT_DUR, 1) channel_axis = 3 melgram_input = Input(shape=input_shape) m_size = [5, 5] n_size = [5, 5] n_filters = 64 maxpool_const = 8 with tf.name_scope('conv1'): x = Convolution2D(n_filters, (m_size[0], n_size[0]), padding='same', kernel_initializer='he_uniform')(melgram_input) x = BatchNormalization(axis=channel_axis)(x) x = ELU()(x) x = MaxPooling2D(pool_size=(maxpool_const, maxpool_const))(x) with tf.name_scope('conv2'): x = Convolution2D(n_filters*2, (m_size[1], n_size[1]), padding='same', kernel_initializer='he_uniform')(x) x = BatchNormalization(axis=channel_axis)(x) x = ELU()(x) x = MaxPooling2D(pool_size=(maxpool_const, maxpool_const))(x) x = Flatten()(x) with tf.name_scope('dense1'): x = Dropout(0.5)(x) x = Dense(n_filters, kernel_initializer='he_uniform', name='hidden')(x) x = ELU()(x) with tf.name_scope('dense2'): x = Dropout(0.5)(x) x = Dense(n_classes, kernel_initializer='he_uniform', activation='softmax', name='prediction')(x) model = Model(melgram_input, x) return model model = build_model(IRMAS_N_CLASSES) with tf.name_scope('optimizer'): optimizer = SGD(lr=init_lr, momentum=0.9, nesterov=True) with tf.name_scope('model'): model = build_model(IRMAS_N_CLASSES) # for the sake of memory, only graphs now with tf.name_scope('callbacks'): # The TensorBoard developers are strongly encourage us to use different directories for every run tb = TensorBoard(log_dir='./example_3', write_graph=True) # yes, we need to recompile the model every time with tf.name_scope('compile'): model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) # and preudo-train the model with tf.name_scope(global_namescope): model.fit_generator(db, steps_per_epoch=1, # just one step epochs=1, # one epoch to save the graphs verbose=2, validation_data=val_data, callbacks=[tb], workers=1) """ Explanation: If you will repeat the training process, you may notice that classification performance improved significantly. Have a look at a new log file in the ./example_2 directory and restart TensorBoard to explore new data. bash cd logs tensorboard --logdir ./example_2 --port=6002 TensorFlow name scopes You might have noticed the hell on the Graphs tab. That's because TensorBoard can't connect all the data nodes in the model and operations in the training process together, it's smart enough to group the nodes with similar structure but don't expect too much. In order to make the better graph visualisation, we need to define the name scopes for each logical layer and each operation we want to see as an individual element. We can do it just by adding with tf.name_scope(name_scope) clause: End of explanation """ from keras import backend as K if K.backend() == 'tensorflow': import tensorflow as tf from tensorflow.contrib.tensorboard.plugins import projector class TensorBoardHiddenOutputVis(Callback): """Tensorboard Intermediate Outputs visualization callback.""" def __init__(self, log_dir='./logs_embed', batch_size=32, freq=0, layer_names=None, metadata=None, sprite=None, sprite_shape=None): super(TensorBoardHiddenOutputVis, self).__init__() self.log_dir = log_dir self.freq = freq self.layer_names = layer_names # Notice, that only one file is supported in the present callback self.metadata = metadata self.sprite = sprite self.sprite_shape = sprite_shape self.batch_size = batch_size def set_model(self, model): self.model = model self.sess = K.get_session() self.summary_writer = tf.summary.FileWriter(self.log_dir) self.outputs_ckpt_path = os.path.join(self.log_dir, 'keras_outputs.ckpt') if self.freq and self.validation_data: # define tensors to compute outputs on outputs_layers = [layer for layer in self.model.layers if layer.name in self.layer_names] self.output_tensors = [tf.get_default_graph().get_tensor_by_name(layer.get_output_at(0).name) for layer in outputs_layers] # create configuration for visualisation in the same manner as for embeddings config = projector.ProjectorConfig() for i in range(len(self.output_tensors)): embedding = config.embeddings.add() embedding.tensor_name = '{ns}/hidden_{i}'.format(ns=global_namescope, i=i) # Simpliest metadata handler, a single file for all embeddings if self.metadata: embedding.metadata_path = self.metadata # Sprite image handler if self.sprite and self.sprite_shape: embedding.sprite.image_path = self.sprite embedding.sprite.single_image_dim.extend(self.sprite_shape) # define TF variables to store the hidden outputs during the training # Notice, that only 1D outputs are supported self.hidden_vars = [tf.Variable(np.zeros((len(self.validation_data[0]), self.output_tensors[i].shape[1]), dtype='float32'), name='hidden_{}'.format(i)) for i in range(len(self.output_tensors))] # add TF variables into computational graph for hidden_var in self.hidden_vars: self.sess.run(hidden_var.initializer) # save the config and setup TF saver for hidden variables projector.visualize_embeddings(self.summary_writer, config) self.saver = tf.train.Saver(self.hidden_vars) def on_epoch_end(self, epoch, logs=None): if self.validation_data and self.freq: if epoch % self.freq == 0: val_data = self.validation_data tensors = (self.model.inputs + self.model.targets + self.model.sample_weights) all_outputs = [[]]*len(self.output_tensors) if self.model.uses_learning_phase: tensors += [K.learning_phase()] assert len(val_data) == len(tensors) val_size = val_data[0].shape[0] i = 0 # compute outputs batch by batch on validation data while i < val_size: step = min(self.batch_size, val_size - i) batch_val = [] batch_val.append(val_data[0][i:i + step]) batch_val.append(val_data[1][i:i + step]) batch_val.append(val_data[2][i:i + step]) if self.model.uses_learning_phase: batch_val.append(val_data[3]) feed_dict = dict(zip(tensors, batch_val)) tensor_outputs = self.sess.run(self.output_tensors, feed_dict=feed_dict) for output_idx, tensor_output in enumerate(tensor_outputs): all_outputs[output_idx].extend(tensor_output) i += self.batch_size # rewrite the current state of hidden outputs with new values for idx, embed in enumerate(self.hidden_vars): embed.assign(np.array(all_outputs[idx])).eval(session=self.sess) self.saver.save(self.sess, self.outputs_ckpt_path, epoch) self.summary_writer.flush() def on_train_end(self, _): self.summary_writer.close() """ Explanation: Have a look at a new log file in the ./example_3 directory and restart TensorBoard to explore new data. bash cd logs tensorboard --logdir ./example_3 Embeddings and Hidden Layers Output Visualisation With TensorBoard we can also visualise the embeddings of the model. In order to do it, you can add Embedding layer to you model. To visualize the outputs of intermediate layers, we can write our custom callback and use it to store the outputs on validation data during the training process. We will follow the notation from TensorBoard callback, but add some functionality: layer_names - a list of names of layers to keep eye on metadata - a path to a TSV file with associated meta information (labels, notes, etc.), format and details sprite - a path to a sprite image, format and details sprite_shape - a list with values [M, N], the dimensionality of a single image, format and details End of explanation """ layers_to_monitor = ['hidden'] # find the files precomputed in ./logs_embed directory metadata_file_name = 'metadata.tsv' sprite_file_name = 'sprite.png' sprite_shape = [N_MEL_BANDS, SEGMENT_DUR] with tf.name_scope('callbacks'): tbe = TensorBoardHiddenOutputVis(log_dir='./logs_embed', freq=1, layer_names=layers_to_monitor, metadata=metadata_file_name, sprite=sprite_file_name, sprite_shape=sprite_shape) tbe.validation_data = val_data with tf.name_scope('compile'): model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) with tf.name_scope(global_namescope): model.fit_generator(db, steps_per_epoch=1, # change to STEPS_PER_EPOCH epochs=1, # change to MAX_EPOCH_NUM verbose=2, callbacks=[tbe], validation_data=val_data, class_weight=None, workers=1) """ Explanation: Now we can add the new callback, recompile and retrain the model. End of explanation """
dboonz/aspp2015
Advanced NumPy Patterns.ipynb
bsd-3-clause
gene0 = [100, 200, 50, 400] gene1 = [50, 0, 0, 100] gene2 = [350, 100, 50, 200] expression_data = [gene0, gene1, gene2] """ Explanation: Intro Juan Nunez-Iglesias Victorian Life Sciences Computation Initiative (VLSCI) University of Melbourne Quick example: gene expression, without numpy | | Cell type A | Cell type B | Cell type C | Cell type D | |--------|-------------|-------------|-------------|-------------| | Gene 0 | 100 | 200 | 50 | 400 | | Gene 1 | 50 | 0 | 0 | 100 | | Gene 2 | 350 | 100 | 50 | 200 | End of explanation """ import numpy as np a = np.array(expression_data) print(a) """ Explanation: Why is this a bad idea? Now with NumPy End of explanation """ def print_info(a): print('number of elements:', a.size) print('number of dimensions:', a.ndim) print('shape:', a.shape) print('data type:', a.dtype) print('strides:', a.strides) print('flags:') print(a.flags) print_info(a) print(a.data) print(a.flatten()) print(a.ravel()) bbytes = a.flatten().view(dtype=np.uint8) # performance issues with C_alignment vs Fortran alignment big_3d_image = np.random.rand(250,250,250) print_info(big_3d_image) %%timeit big_3d_image = np.random.rand(250,250,250) #big_3d_image *=5 #for plane in big_3d_image: # plane *= 5 # fortran contiguous for i in range(big_3d_image.shape[1]): big_3d_image[:,:,i] *=5 #brighter_3d_image = big_3d_image*5.1 bbytes # a.ravel gives a flattened version of the array. Difference with flatten: abytes = a.ravel().view(dtype=np.uint8) print_info(abytes) print(abytes[:24]) """ Explanation: We are going to: Obtain an RPKM expression matrix Quantile normalize the data RKPM: Reads per kilobase per million reads Blabla about gene expression using the awesome power of NumPy Inside a numpy ndarray End of explanation """ print_info(a) print_info(a.T) """ Explanation: Example: take the transpose of a End of explanation """ print_info(a.T) print_info(a.T[::2]) print_info(a.T[::2, ::2]) """ Explanation: Example: skipping rows and columns with slicing End of explanation """ b = a print(b) a[0, 0] = 5 print(b) a[0, 0] = 100 """ Explanation: Getting a copy End of explanation """ expr = np.load('expr.npy') print_info(expr) """ Explanation: Advanced operations: axis-wise evaluation End of explanation """ lib_size = np.sum(expr, axis=0) """ Explanation: This has the raw read count data. 20500 genes are read for 375 patients. However, each sample gets a different number of reads, so we want to normalize by the library size, which is the total number of reads across a column. (The total number of reads of one person) The np.sum function returns the sum of all the elements of an array. With the axis argument, you can take the sum along the given axis. End of explanation """ x = np.random.rand(10,3) def find_closest_value_per_column(x, value=0.75): xacc = abs(x - value) return np.argmin(xacc, axis=1) find_closest_value_per_column(x) np.argmax(x) %pylab inline plot? x = np.random.rand(100,100) x[20:30,20:30] = 3 x[25,25] = 3.5 imshow(x, cmap='gray') xm, ym =unravel_index(max_idx, x.shape) max_idx = np.argmax(x) unravel_index(max_idx, x.shape) plot(ym, xm, 'ro') """ Explanation: Exercise Generate a 10 x 3 array of random numbers. From each row, pick the number closest to 0.75. Make use of np.abs and np.argmax to find the column j which contains the closest element in each row. End of explanation """ print(expr.shape) print(lib_size.shape) print(lib_size[np.newaxis, :].shape) """ Explanation: Advanced operations: broadcasting In order to normalize every column by its corresponding library size, we have to align the two arrays' axes: each dimension must be either the same size, or one of the arrays must have size 1. Use np.newaxis to match the dimensions. End of explanation """ np.all(expr / lib_size == expr / lib_size[np.newaxis, :]) expr_lib = expr / lib_size """ Explanation: However, NumPy will automatically prepend singleton dimensions until the array shapes match or there is an error: End of explanation """ expr_lib *= 1e6 imshow(expr_lib) """ Explanation: We also multiply by $10^6$ in order to keep the numbers on a readable scale (reads per million reads). End of explanation """ gene_len = np.load('gene-lens.npy') print(gene_len.shape) """ Explanation: Finally, longer genes are more likely to produce reads. So we normalize by the gene length (in kb) to produce a measure of expression called Reads Per Kilobase per Million reads (RPKM). End of explanation """ gene_len.shape lib_size.shape #rpkm[i,j] = 10**11 * expr[i,j] / (lib_size[j]*gene_size[i]) # option 1: Use np.newaxis %timeit 10**3 * expr_lib / gene_len[:, np.newaxis] # probably better rpkm = 10**3 * expr_lib / gene_len[:, np.newaxis] # have the expr_lib matrix dancing around untill it works rpkm2 = (10**3* expr_lib.T / gene_len).T rpkm1 - rpkm2 %timeit (10**11* (expr_lib / lib_size ).T / gene_len).T from matplotlib import pyplot as plt from scipy import stats def plot_col_density(data, xlim=None, *args, **kwargs): # Use gaussian smoothing to estimate the density density_per_col = [stats.kde.gaussian_kde(col) for col in data.T] if xlim is not None: m, M = xlim else: m, M = np.min(data), np.max(data) x = np.linspace(m, M, 100) plt.figure() for density in density_per_col: plt.plot(x, density(x), *args, **kwargs) plt.xlabel('log-counts') plt.ylabel('frequency') if xlim is not None: plt.xlim(xlim) plt.show() %matplotlib inline plt.style.use('ggplot') plot_col_density(np.log(expr+1)) plot_col_density(np.log(rpkm + 1), xlim=(0, 20)) """ Explanation: Exercise: broadcast expr_lib and gene_len together to produce RPKM End of explanation """ x = np.random.rand(3, 5) y = np.random.randint(10, size=8) z = x[..., newaxis] + y#[:,newaxis, newaxis] print(x.shape) print(y.shape) # To use broadcasting efficiently, the dimensions need to either match or be one. # This is the explicit way. x[newaxis,:,:] + y[:,newaxis, newaxis] """ Explanation: Exercise: 3D broadcasting Below, produce the array containing the sum of every element in x with every element in y End of explanation """ def repeat(arr, n): return np.lib.stride_tricks.as_strided(arr, shape=(n,) + arr.shape, strides=(0,) + arr.strides) a = repeat(np.random.rand(5), 4) a print_info(a) """ Explanation: Exercise: explicit broadcasting and stride tricks Use np.broadcast_arrays to get the same-shape arrays that numpy adds together. Then use print_info on the output. Notice anything weird? Stride tricks By manipulating the shape and strides of an array, we can produce a "virtual" array much bigger than its memory usage: End of explanation """ def sliding_window(arr, size=2): """Produce an array of sliding window views of `arr` Parameters ---------- arr : 1D array, shape (N,) The input array. size : int, optional The size of the sliding window. Returns ------- arr_slide : 2D array, shape (N - size + 1, size) The sliding windows of size `size` of `arr`. Examples -------- >>> a = np.array([0, 1, 2, 3]) >>> sliding_window(a, 2) array([[0, 1], [1, 2], [2, 3]]) """ N = len(arr) desired_shape = (N - size + 1, size) s = arr.strides[0] # like this it will be independent on the datatype return np.lib.stride_tricks.as_strided(arr, shape=desired_shape, strides=(s, s)) #return arr # fix this # test your code here %timeit sliding_window(np.random.rand(1e5), 3).sum(axis=1)/3. """ Explanation: Exercise: np.lib.stride_tricks.as_strided Use as_strided to produce a sliding-window view of a 1D array. End of explanation """ values = np.array([0, 5, 99]) selector = np.random.randint(0, 3, size=(3, 4)) print(selector) print(values[selector]) """ Explanation: Fancy indexing You can index arrays with slicing, but also with boolean arrays (including broadcasting!), integer arrays, and individual indices along multiple dimensions. End of explanation """ np.mgrid[0:4,0:3][1] repeat(np.arange(0,3),4) testlist = np.array([1.1,2.1, 2.1, 2.1, 3.1, 6.1, 4.1, 5.1]) indices = np.argsort(testlist) unique_items,number_of_occurences = np.unique(testlist, return_counts=True) indices, unique_items, number_of_occurences index = 0 while index < len(testlist): if number_of_occurences[i] == 1: pass else: indices[index+1:index+number_of_occurences[i]] -= np.arange(number_of_occurences[i]-1) indices[index+number_of_occurences[i]:] -= number_of_occurences[i]-1 index += 1 indices index np.unique(testdata) for r in testdata.T: print (np.unique(r, return_counts=True)) np.argsort? testdata = np.loadtxt('./testdata.dat', delimiter=',') print(testdata) # convert into an array with rank values according to column rank_idx = np.argsort(np.argsort(testdata,axis=0), axis=0) #np.argsort(rank_idx, axis=0) # TODO SOLVE FOR 2 values of 4 # sort the input data sorted_testdata = np.sort(testdata, axis=0) # determine the ranks by computing the mean for each row ranks = np.mean(sorted_testdata, axis=1) ranks[rank_idx] #print(rank_idx) #testdata[rank_idx, np.mgrid[0:4,0:3][1]] np.sort(testdata, axis=0) def qnorm(x): """Quantile normalize an input matrix. Parameters ---------- x : 2D array of float, shape (M, N) The input data, with each column being a distribution to normalize. Returns ------- xn : 2D array of float, shape (M, N) The normalized data. """ # convert into an array with rank values according to column rank_idx = np.argsort(np.argsort(x,axis=0), axis=0) # sort the input data sorted_x = np.sort(x, axis=0) # determine the ranks by computing the mean for each row ranks = np.mean(sorted_x, axis=1) xn = ranks[rank_idx] return xn logexpr = np.log(expr + 1) logrpkm = np.log(rpkm + 1) logexprn = qnorm(logexpr) logrpkmn = qnorm(logrpkm) plot_col_density(logexprn) plot_col_density(logrpkmn, xlim=(0, 0.25)) """ Explanation: Exercise: quantile normalization Quantile Normalization(https://en.wikipedia.org/wiki/Quantile_normalization) is a method to align distributions. Implement it using NumPy axis-wise operations and fancy indexing. Hint: look for documentation for scipy.mstats.rankdata, np.sort, and np.argsort. End of explanation """ # "data" ni, nj, nk = (10, 15, 20) amplitude = np.random.rand(ni, nj, nk) horizon = np.random.randint(5, 15, size=(ni, nj)) """ Explanation: Advanced exercise Jack's dilemma (If time permits.) email Date: Wed, 16 Jul 2008 16:45:37 -0500 From: Jack Cook To: &lt;numpy-discussion@scipy.org&gt; Subject: Numpy Advanced Indexing Question Greetings, I have an I,J,K 3D volume of amplitude values at regularly sampled time intervals. I have an I,J 2D slice which contains a time (K) value at each I, J location. What I would like to do is extract a subvolume at a constant +/- K window around the slice. Is there an easy way to do this using advanced indexing or some other method? Thanks in advanced for your help. -- Jack End of explanation """ import numpy as np from mutable_str import MutableString s = MutableString('abcde') # --- EDIT THIS SECTION --- # Create an array interface to this foreign object s.__array_interface__ = {'data' : (XXX, False), # (ptr, is read_only?) 'shape' : XXX, 'typestr' : '|u1', # typecode unsigned character } # --- EDIT THIS SECTION --- print 'String before converting to array:', s sa = np.asarray(s) print 'String after converting to array:', sa sa += 2 print 'String after adding "2" to array:', s """ Explanation: Even more advanced: NumPy Array Interface An author of a foreign package (included with the exercizes as problems/mutable_str.py) provides a string class that allocates its own memory: ipython In [1]: from mutable_str import MutableString In [2]: s = MutableString('abcde') In [3]: print s abcde You'd like to view these mutable (mutable means the ability to modify in place) strings as ndarrays, in order to manipulate the underlying memory. Add an array_interface dictionary attribute to s, then convert s to an ndarray. Numerically add "2" to the array (use the in-place operator +=). Then print the original string to ensure that its value was modified. Hint: Documentation for NumPy's __array_interface__ may be found in the online docs. Here's a skeleton outline: End of explanation """
Applied-Groundwater-Modeling-2nd-Ed/Chapter_4_problems-1
P4.5_Flopy_Hubbertville_areal_model_BCs.ipynb
gpl-2.0
%matplotlib inline import sys import os import shutil import numpy as np from subprocess import check_output # Import flopy import flopy """ Explanation: <img src="AW&H2015.tiff" style="float: left"> <img src="flopylogo.png" style="float: center"> Problem P4.5 Hubbertville Areal Model Perimeter Boundary Conditions In Problem P4.3 in Anderson, Woessner and Hunt (2015), we are asked to construct an areal 2D model to assess impacts of pumping. The town of Hubbertville is planning to expand its water supply by constructing a pumping well in an unconfined gravel aquifer (Fig. P4.3). The well is designed to pump constantly at a rate of 20,000 m3/day. Well construction was halted by the State Fish and Game Service who manage the Green Swamp Conservation area. The agency claimed that pumping would “significantly reduce” groundwater discharge to the swamp and damage waterfowl habitat. The town claimed the fully penetrating river boundary to the north and the groundwater divide located near the center of the valley would prevent any change in flow to the swamp. Building on P4.3, in Problem P4.5 on page 175 we are asked to replace the constant head boundaries at both the river and swamp in Problem P4.3a with specified flow boundaries. Use the water balance results from Problem P4.3a to calculate the boundary fluxes at the river and swamp. Note that all the boundaries of the model are now specified flow boundaries, including the zero flow lateral boundary conditions. Part a. Run the model with specified flow boundaries using starting heads of 1000 m and then a second time with starting heads of 2000 m. (Note: Some GUIs will warn that using all specified flow boundaries can create errors, or will not permit the model to execute under these conditions). Compare the results with those in P4.3a and explain the differences. In this notebook, we will again work through the problem using MODFLOW and the Python tool set Flopy. Notice how much code is reused from P4.1 because the variable names remained the same. <img src="P4.3_figure.tiff" style="float: center"> Below is an iPython Notebook that builds upon the Python Notebook created in Problem P4.3. See the Github wiki associated with this Chapter for information on one suggested installation and setup configuration for Python and iPython Notebook. [Acknowledgements: This tutorial was created by Randy Hunt and all failings are mine. The exercise here has benefited greatly from the online Flopy tutorial and example notebooks developed by Chris Langevin and Joe Hughes for the USGS Spring 2015 Python Training course GW1774] Creating the Model Using the same steps as in P4.3, we will create a simple groundwater flow model by following the tutorial included on the Flopy website. We will make a few small changes so that the tutorial works with our file structure. Visit the tutorial website here. Setup the Notebook Environment and Import Flopy Load a few standard libraries, and then load flopy. End of explanation """ # Set the name of the path to the model working directory dirname = "P4-5_Hubbertville" datapath = os.getcwd() modelpath = os.path.join(datapath, dirname) print 'Name of model path: ', modelpath # Now let's check if this directory exists. If not, then we will create it. if os.path.exists(modelpath): print 'Model working directory already exists.' else: print 'Creating model working directory.' os.mkdir(modelpath) """ Explanation: Setup a New Directory and Change Paths For this tutorial, we will work in a new subdirectory underneath the directory where the notebook is located. We can use some fancy Python tools to help us manage the directory creation. Note that if you encounter path problems with this workbook, you can stop and then restart the kernel and the paths will be reset. End of explanation """ # model domain and grid definition # for clarity, user entered variables are all caps; python syntax are lower case or mixed case # In a contrast to P4.1 and P4.2, this is an areal 2D model LX = 4500. LY = 11000. # note that there is an added 500m on the top and bottom to represent the boundary conditions,that leaves an aqufier lenght of 10000 m ZTOP = 1030. # the system is unconfined so set the top above land surface so that the water table never > layer top ZBOT = 980. NLAY = 1 NROW = 22 NCOL = 9 DELR = LX / NCOL # recall that MODFLOW convention is DELR is along a row, thus has items = NCOL; see page XXX in AW&H (2015) DELC = LY / NROW # recall that MODFLOW convention is DELC is along a column, thus has items = NROW; see page XXX in AW&H (2015) DELV = (ZTOP - ZBOT) / NLAY BOTM = np.linspace(ZTOP, ZBOT, NLAY + 1) HK = 50. VKA = 1. RCH = 0.001 WELLQ = 0. #recall MODFLOW convention, negative means pumped out of the model domain (=aquifer) print "DELR =", DELR, " DELC =", DELC, ' DELV =', DELV print "BOTM =", BOTM print "Recharge =", RCH print "Pumping well rate =", WELLQ """ Explanation: Define the Model Extent, Grid Resolution, and Characteristics It is normally good practice to group things that you might want to change into a single code block. This makes it easier to make changes and rerun the code. End of explanation """ # Assign name and create modflow model object modelname = 'P4-5' #exe_name = os.path.join(datapath, 'mfnwt.exe') # for Windows OS exe_name = os.path.join(datapath, 'mfnwt') # for Mac OS print 'Model executable: ', exe_name MF = flopy.modflow.Modflow(modelname, exe_name=exe_name, model_ws=modelpath) """ Explanation: Create the MODFLOW Model Object Create a flopy MODFLOW object: flopy.modflow.Modflow. End of explanation """ # Create the discretization object TOP = ZTOP * np.ones((NROW, NCOL),dtype=np.float) DIS_PACKAGE = flopy.modflow.ModflowDis(MF, NLAY, NROW, NCOL, delr=DELR, delc=DELC, top=TOP, botm=BOTM[1:], laycbd=0) # print DIS_PACKAGE #uncomment this on far left to see information about the flopy object """ Explanation: Discretization Package Create a flopy discretization package object: flopy.modflow.ModflowDis. End of explanation """ # Variables for the BAS package IBOUND = np.ones((NLAY, NROW, NCOL), dtype=np.int32) # all nodes are active (IBOUND = 1) # In this problem we have no specified heads print IBOUND STRT = 1000 * np.ones((NLAY, NROW, NCOL), dtype=np.float32) # P4.5a starting head =1000 m throughout model domain print STRT BAS_PACKAGE = flopy.modflow.ModflowBas(MF, ibound=IBOUND, strt=STRT) # print BAS_PACKAGE # uncomment this at far left to see the information about the flopy BAS object """ Explanation: Basic Package Create a flopy basic package object: flopy.modflow.ModflowBas. End of explanation """ LPF_PACKAGE = flopy.modflow.ModflowUpw(MF, laytyp=1, hk=HK, vka=VKA) # we defined the K and anisotropy at top of file # print LPF_PACKAGE # uncomment this at far left to see the information about the flopy LPF object """ Explanation: Upstream Weighting Package (like Layer Property Flow Package) Create a flopy layer property flow package object: flopy.modflow.ModflowUpw. End of explanation """ STRESS_PERIOD_DATA = [ [0, 0, 0, -2500], #layer, row, column, Specified flux Q from P4.3 northern BC [0, 0, 1, -2500], #remember Python indexing is zero based [0, 0, 2, -2500], [0, 0, 3, -2500], [0, 0, 4, -2500], [0, 0, 5, -2500], [0, 0, 6, -2500], [0, 0, 7, -2500], [0, 0, 8, -2500], [0, 21, 0, -2500], #layer, row, column, Specified flux Q from P4.3 southern BC [0, 21, 1, -2500], #remember Python indexing is zero based [0, 21, 2, -2500], [0, 21, 3, -2500], [0, 21, 4, -2500], [0, 21, 5, -2500], [0, 21, 6, -2500], [0, 21, 7, -2500], [0, 21, 8, -2500]] WEL_PACKAGE = flopy.modflow.ModflowWel(MF, stress_period_data=STRESS_PERIOD_DATA) # remember python 0 index, layer 0 = layer 1 in MF #print WEL_PACKAGE # uncomment this at far left to see the information about the flopy WEL object """ Explanation: Well Package Create a flopy output control object: flopy.modflow.ModflowWel. End of explanation """ OC_PACKAGE = flopy.modflow.ModflowOc(MF) # we'll use the defaults for the model output # print OC_PACKAGE # uncomment this at far left to see the information about the flopy OC object """ Explanation: Output Control Create a flopy output control object: flopy.modflow.ModflowOc. End of explanation """ NWT_PACKAGE = flopy.modflow.ModflowNwt(MF, headtol=0.001, fluxtol=5000, maxiterout=900) # print NWT_PACKAGE # uncomment this at far left to see the information about the flopy PCG object """ Explanation: Newton-Raphson Solver Create a flopy pcg package object: flopy.modflow.ModflowNwt. End of explanation """ #In Problem P4.3 the River and Swamp were specified head BCs. MODFLOW does not include recharge added to a #a specified head node in its water budget. Now that we've changed them to active nodes (IBOUND=1) these would receive #recharge, and our model would be out of balance if we simply plugged in the BC fluxes. Try it, you should see that #perimeter boundary fluxes calculated from P4.3 < recharge specified in P4.5). Therefore, we'll adjust the recharge #array so that the perimeter BC nodes are given a recharge rate = 0. RCH_ARRAY = RCH * np.ones((NROW, NCOL), dtype=np.float32) # set the variable RCH specifed above for model domain RCH_ARRAY[0, :] = 0 #don't forget arrays are zero-based! RCH_ARRAY[-1, :] = 0 #-1 is Python for last in array print RCH_ARRAY RCH_PACKAGE = flopy.modflow.ModflowRch(MF, rech=RCH_ARRAY) # print RCH_PACKAGE # uncomment this at far left to see the information about the flopy RCH object """ Explanation: Recharge Package Create a flopy pcg package object: flopy.modflow.ModflowRch. End of explanation """ #Before writing input, destroy all files in folder to prevent reusing old files #Here's the working directory print modelpath #Here's what's currently in the working directory modelfiles = os.listdir(modelpath) print modelfiles #delete these files to prevent us from reading old results modelfiles = os.listdir(modelpath) for filename in modelfiles: f = os.path.join(modelpath, filename) if modelname in f: try: os.remove(f) print 'Deleted: ', filename except: print 'Unable to delete: ', filename #Now write the model input files MF.write_input() """ Explanation: Writing the MODFLOW Input Files Before we create the model input datasets, we can do some directory cleanup to make sure that we don't accidently use old files. End of explanation """ # return current working directory print "You can check the newly created files in", modelpath """ Explanation: Yup. It's that simple, the model datasets are written using a single command (mf.write_input). Check in the model working directory and verify that the input files have been created. Or if you might just add another cell, right after this one, that prints a list of all the files in our model directory. The path we are working in is returned from this next block. End of explanation """ silent = False #Print model output to screen? pause = False #Require user to hit enter? Doesn't mean much in Ipython notebook report = True #Store the output from the model in buff success, buff = MF.run_model(silent=silent, pause=pause, report=report) """ Explanation: Running the Model Flopy has several methods attached to the model object that can be used to run the model. They are run_model, run_model2, and run_model3. Here we use run_model3, which will write output to the notebook. End of explanation """ #imports for plotting and reading the MODFLOW binary output file import matplotlib.pyplot as plt import flopy.utils.binaryfile as bf #Create the headfile object and grab the results for last time. headfile = os.path.join(modelpath, modelname + '.hds') headfileobj = bf.HeadFile(headfile) #Get a list of times that are contained in the model times = headfileobj.get_times() print 'Headfile (' + modelname + '.hds' + ') contains the following list of times: ', times #Get a numpy array of heads for totim = 1.0 #The get_data method will extract head data from the binary file. HEAD = headfileobj.get_data(totim=1.0) #Print statistics on the head print 'Head statistics' print ' min: ', HEAD.min() print ' max: ', HEAD.max() print ' std: ', HEAD.std() #Create a contour plot of heads FIG = plt.figure(figsize=(15,13)) #setup contour levels and plot extent LEVELS = np.arange(989., 1005., 0.5) EXTENT = (DELR/2., LX - DELR/2., DELC/2., LY - DELC/2.) print 'Contour Levels: ', LEVELS print 'Extent of domain: ', EXTENT #Make a contour plot on the first axis AX1 = FIG.add_subplot(1, 2, 1, aspect='equal') AX1.set_xlabel("x") AX1.set_ylabel("y") YTICKS = np.arange(0, 11000, 500) AX1.set_yticks(YTICKS) AX1.set_title("Hubbertville contour map") AX1.text(2000, 10500, r"River", fontsize=10, color="blue") AX1.text(1800, 340, r"Green Swamp", fontsize=10, color="green") AX1.contour(np.flipud(HEAD[0, :, :]), levels=LEVELS, extent=EXTENT) #Make a color flood on the second axis AX2 = FIG.add_subplot(1, 2, 2, aspect='equal') AX2.set_xlabel("x") AX2.set_ylabel("y") AX2.set_yticks(YTICKS) AX2.set_title("Hubbertville color flood") AX2.text(2000, 10500, r"River", fontsize=10, color="black") AX2.text(1800, 340, r"Green Swamp", fontsize=10, color="black") cax = AX2.imshow(HEAD[0, :, :], extent=EXTENT, interpolation='nearest') cbar = FIG.colorbar(cax, orientation='vertical', shrink=0.45) """ Explanation: Post Processing the Results To read heads from the MODFLOW binary output file, we can use the flopy.utils.binaryfile module. Specifically, we can use the HeadFile object from that module to extract head data arrays. End of explanation """ #look at the head in column = 4 from headobj, and then plot it #print HEAD along a column; COL is a variable that allows us to change this easily COL = 4 print HEAD[0,:,COL] # we see this is what we want, but is flipped because MODFLOW's array does not = Python, so we reverse the order (flip them) and call it Y = np.flipud(HEAD[0,:,COL]) print Y #for our cross section create X-coordinates to match with heads XCOORD = np.arange(0, 11000, 500) + 250 print XCOORD fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(1, 1, 1) TITLE = 'cross section of head along Column = ({0})'.format(COL) ax.set_title(TITLE) ax.set_xlabel('y') ax.set_ylabel('head') ax.set_xlim(0, 11000.) ax.set_ylim(980.,1020.) ax.text(10480, 998, r"River", fontsize=10, color="blue",rotation='vertical') ax.text(300, 998, r"Green Swamp", fontsize=10, color="green",rotation='vertical') ax.text(5300,1009., r"Groundwater Divide", fontsize=10, color="black",rotation='vertical') ax.plot(XCOORD, Y) """ Explanation: Look at the bottom of the MODFLOW output file (ending with a *.list) and note the water balance reported. End of explanation """ #in order to keep the problem unconfined we'll have to raise the top above what was set in P4.3 (=1030 m) ZTOP = 2030. STRT = 2000 * np.ones((NLAY, NROW, NCOL), dtype=np.float32) # P4.5a starting head=2000 m throughout model domain print STRT #we have to update the MODFLOW's BAS Package with the new IBOUND array BAS_PACKAGE = flopy.modflow.ModflowBas(MF, ibound=IBOUND, strt=STRT) #in order to keep the problem unconfined we'll have to raise the top above what was set in P4.3 (=1030 m) TOP = 2030. #in order to keep the same transmissivity we need to raise the base so the resulting saturated thickness is the same BOT = 1980. DIS_PACKAGE = flopy.modflow.ModflowDis(MF, NLAY, NROW, NCOL, delr=DELR, delc=DELC, top=TOP, botm=BOT, laycbd=0) # print DIS_PACKAGE #uncomment this on far left to see information about the flopy object #delete these files to prevent us from reading old results modelfiles = os.listdir(modelpath) for filename in modelfiles: f = os.path.join(modelpath, filename) if modelname in f: try: os.remove(f) print 'Deleted: ', filename except: print 'Unable to delete: ', filename #Now write the model input files MF.write_input() print "New MODFLOW input files = ", modelfiles print "You can check the newly created files in", modelpath #rerun MODFLOW-NWT silent = False #Print model output to screen? pause = False #Require user to hit enter? Doesn't mean much in Ipython notebook report = True #Store the output from the model in buff success, buff = MF.run_model(silent=silent, pause=pause, report=report) #As before, let's look at the results and compare to P4-3 Part a. #imports for plotting and reading the MODFLOW binary output file import matplotlib.pyplot as plt import flopy.utils.binaryfile as bf #Create the headfile object and grab the results for last time. headfile = os.path.join(modelpath, modelname + '.hds') headfileobj = bf.HeadFile(headfile) #Get a list of times that are contained in the model times = headfileobj.get_times() print 'Headfile (' + modelname + '.hds' + ') contains the following list of times: ', times #Get a numpy array of heads for totim = 1.0 #The get_data method will extract head data from the binary file. HEAD = headfileobj.get_data(totim=1.0) #Print statistics on the head print 'Head statistics' print ' min: ', HEAD.min() print ' max: ', HEAD.max() print ' std: ', HEAD.std() #-999.99 is the Inactive node flag so we'll use our previous contour settings #Create a contour plot of heads FIG = plt.figure(figsize=(15,13)) #setup contour levels and plot extent LEVELS = np.arange(1989., 2005., 0.5) EXTENT = (DELR/2., LX - DELR/2., DELC/2., LY - DELC/2.) print 'Contour Levels: ', LEVELS print 'Extent of domain: ', EXTENT #Make a contour plot on the first axis AX1 = FIG.add_subplot(1, 2, 1, aspect='equal') AX1.set_xlabel("x") AX1.set_ylabel("y") YTICKS = np.arange(0, 11000, 500) AX1.set_yticks(YTICKS) AX1.set_title("Hubbertville contour map") AX1.text(2000, 10500, r"River", fontsize=10, color="blue") AX1.text(1800, 340, r"Green Swamp", fontsize=10, color="green") AX1.contour(np.flipud(HEAD[0, :, :]), levels=LEVELS, extent=EXTENT) #Make a color flood on the second axis AX2 = FIG.add_subplot(1, 2, 2, aspect='equal') AX2.set_xlabel("x") AX2.set_ylabel("y") AX2.set_yticks(YTICKS) AX2.set_title("Hubbertville color flood") AX2.text(2000, 10500, r"River", fontsize=10, color="black") AX2.text(1800, 340, r"Green Swamp", fontsize=10, color="black") cax = AX2.imshow(HEAD[0, :, :], extent=EXTENT, interpolation='nearest', vmin=1989) cbar = FIG.colorbar(cax, orientation='vertical', shrink=0.45) COL = 4 # recall we need to flip because MODFLOW's array does not = Python, so we reverse the order (flip them) and call it Y = np.flipud(HEAD[0,:,COL]) print Y #for our cross section create X-coordinates to match with heads XCOORD = np.arange(0, 11000, 500) + 250 print XCOORD fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(1, 1, 1) TITLE = 'cross section of head along Column = ({0})'.format(COL) ax.set_title(TITLE) ax.set_xlabel('y') ax.set_ylabel('head') ax.set_xlim(0, 11000.) ax.set_ylim(1980.,2020.) ax.text(10480, 1998, r"River", fontsize=10, color="blue",rotation='vertical') ax.text(300, 1998, r"Green Swamp", fontsize=10, color="green",rotation='vertical') ax.text(5400,2006., r"Groundwater Divide", fontsize=10, color="black",rotation='vertical') ax.plot(XCOORD, Y) """ Explanation: Now Change Starting Heads to 2000 m End of explanation """ # let's use the first model of Part a so that the starting head of 1000 m is not below the bottom of the aquifer TOP = 1030. BOT = 980. #we'll set the cell in the first row and first column to a specified head (changing it's IBOUND value to -1) IBOUND[:, 0, 0] = -1 print IBOUND #change the starting head to equal 1000 m STRT = 1000 * np.ones((NLAY, NROW, NCOL), dtype=np.float32) print STRT #we have to update MODFLOW's BAS Package with the new IBOUND and STRT heads BAS_PACKAGE = flopy.modflow.ModflowBas(MF, ibound=IBOUND, strt=STRT) print 'aquifer top =', TOP print 'aquifer bottom =', BOT #we have to update MODFLOW's DIS Package for the new tops and bottoms DIS_PACKAGE = flopy.modflow.ModflowDis(MF, NLAY, NROW, NCOL, delr=DELR, delc=DELC, top=TOP, botm=BOT, laycbd=0) #delete old files to prevent us from reading old results modelfiles = os.listdir(modelpath) for filename in modelfiles: f = os.path.join(modelpath, filename) if modelname in f: try: os.remove(f) print 'Deleted: ', filename except: print 'Unable to delete: ', filename #Now write the model input files MF.write_input() print "New MODFLOW input files = ", modelfiles print "You can check the newly created files in", modelpath #rerun MODFLOW-2005 silent = False #Print model output to screen? pause = False #Require user to hit enter? Doesn't mean much in Ipython notebook report = True #Store the output from the model in buff success, buff = MF.run_model(silent=silent, pause=pause, report=report) #As before, let's look at the results and compare to P4-3 Part a. #imports for plotting and reading the MODFLOW binary output file import matplotlib.pyplot as plt import flopy.utils.binaryfile as bf #Create the headfile object and grab the results for last time. headfile = os.path.join(modelpath, modelname + '.hds') headfileobj = bf.HeadFile(headfile) #Get a list of times that are contained in the model times = headfileobj.get_times() print 'Headfile (' + modelname + '.hds' + ') contains the following list of times: ', times #Get a numpy array of heads for totim = 1.0 #The get_data method will extract head data from the binary file. HEAD = headfileobj.get_data(totim=1.0) #Print statistics on the head print 'Head statistics' print ' min: ', HEAD.min() print ' max: ', HEAD.max() print ' std: ', HEAD.std() #Create a contour plot of heads FIG = plt.figure(figsize=(15,13)) #setup contour levels and plot extent LEVELS = np.arange(1000., 1011., 0.5) EXTENT = (DELR/2., LX - DELR/2., DELC/2., LY - DELC/2.) #Make a contour plot on the first axis AX1 = FIG.add_subplot(1, 2, 1, aspect='equal') AX1.set_xlabel("x") AX1.set_ylabel("y") YTICKS = np.arange(0, 11000, 500) AX1.set_yticks(YTICKS) AX1.set_title("Hubbertville contour map") AX1.text(2000, 10500, r"River", fontsize=10, color="blue") AX1.text(1800, 340, r"Green Swamp", fontsize=10, color="green") AX1.contour(np.flipud(HEAD[0, :, :]), levels=LEVELS, extent=EXTENT) #Make a color flood on the second axis AX2 = FIG.add_subplot(1, 2, 2, aspect='equal') AX2.set_xlabel("x") AX2.set_ylabel("y") AX2.set_yticks(YTICKS) AX2.set_title("Hubbertville color flood") AX2.text(2000, 10500, r"River", fontsize=10, color="black") AX2.text(1800, 340, r"Green Swamp", fontsize=10, color="black") cax = AX2.imshow(HEAD[0, :, :], extent=EXTENT, interpolation='nearest', vmin=998.2) cbar = FIG.colorbar(cax, orientation='vertical', shrink=0.45) #as before let's plot a north-south cross section COL = 4 # recall we need to flip because MODFLOW's array does not = Python, so we reverse the order (flip them) and call it Y = np.flipud(HEAD[0,:,COL]) #for our cross section create X-coordinates to match with heads XCOORD = np.arange(0, 11000, 500) + 250 fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(1, 1, 1) TITLE = 'cross section of head along Column = ({0})'.format(COL) ax.set_title(TITLE) ax.set_xlabel('y') ax.set_ylabel('head') ax.set_xlim(0, 11000.) ax.set_ylim(980.,1020.) ax.text(10480, 998, r"River", fontsize=10, color="blue",rotation='vertical') ax.text(300, 998, r"Green Swamp", fontsize=10, color="green",rotation='vertical') ax.text(5400,1007., r"Groundwater Divide", fontsize=10, color="black",rotation='vertical') ax.plot(XCOORD, Y) """ Explanation: How do the two versions of starting head compare? What does this conceptualization need to keep this problem from being a relative result? Part b. Take one of the models you designed in part (a) and replace one constant flux node on either the river or swamp boundary with a specified head node equal to 1000 m. Run the model to steady state. Compare the results with those in part (a) and with P4.3a. Explain the differences. End of explanation """
jmschrei/pomegranate
examples/naivebayes_simple_male_female.ipynb
mit
from pomegranate import * import seaborn seaborn.set_style('whitegrid') %pylab inline """ Explanation: Naive Bayes Simple Male or Female author: Nicholas Farn [<a href="sendto:nicholasfarn@gmail.com">nicholasfarn@gmail.com</a>] This example shows how to create a simple Gaussian Naive Bayes Classifier using pomegranate. In this example we will be given a set of data measuring a person's height (feet) and try to classify them as male or female. This example is a simplification drawn from the example in the Wikipedia <a href="https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Examples">article</a> on Naive Bayes Classifiers. End of explanation """ male = NormalDistribution.from_samples([ 6.0, 5.92, 5.58, 5.92, 6.08, 5.83 ]) female = NormalDistribution.from_samples([ 5.0, 5.5, 5.42, 5.75, 5.17, 5.0 ]) """ Explanation: First we'll create the distributions for our model. In this case we'll assume that height, weight, and foot size are normally distributed. We'll fit our distribution to a set of data for males and females. End of explanation """ male.plot( n=100000, edgecolor='c', color='c', bins=50, label='Male' ) female.plot( n=100000, edgecolor='g', color='g', bins=50, label='Female' ) plt.legend( fontsize=14 ) plt.ylabel('Count') plt.xlabel('Height (ft)') plt.show() print("Male distribution has mu = {:.3} and sigma = {:.3}".format( *male.parameters )) print("Female distribution has mu = {:.3} and sigma = {:.3}".format( *female.parameters )) """ Explanation: Let's check on the parameters for our male and female height distributions. End of explanation """ clf = NaiveBayes([ male, female ]) """ Explanation: Everything seems to look good so let's create our Naive Bayes Classifier. End of explanation """ data = np.array([[5.0], [6.0], [4.92], [5.5]]) for sample, probability in zip( data, clf.predict_proba(data) ): print("Height {}, {} chance male and {} chance female".format( sample, 100*probability[0], 100*probability[1])) for sample, result in zip( data, clf.predict( data )): print("Person with height {} is {}.".format( sample, "female" if result else "male" )) """ Explanation: Let's take a look at how our classifier calls people of various heights. We can either look at a probabilistic measurement of the sample being male or female, or a hard call prediction. Lets take a look at both. End of explanation """ X = np.array([[180], [190], [170], [165], [100], [150], [130], [150]]) y = np.array([ 0, 0, 0, 0, 1, 1, 1, 1 ]) clf.fit( X, y ) """ Explanation: These results look good. We can also train a our classifier with a set of data. This is done by creating a set of observations along with a set with the corresponding correct classification. End of explanation """ data = np.array([[130], [200], [100], [162], [145]]) """ Explanation: In this case we fitted the normal distributions to fit a set of data with male an female weights (lbs). Let's check the results with the following data set. End of explanation """ for sample, result in zip( data, clf.predict( data )): print("Person with weight {} is {}.".format( sample, "female" if result else "male" )) """ Explanation: Now let's enter it into our classifier. End of explanation """
aylward/ITKTubeTK
examples/archive/VesselExtractionUsingCTA_TrainVascularModel/VesselExtractionUsingCTA_TrainVascularModel.ipynb
apache-2.0
import os import sys import numpy # Path for TubeTK libs and bin #Values takend from TubeTK launcher #sys.path.append("C:/src/TubeTK_Python_ITK/SlicerExecutionModel-build/GenerateCLP/") #sys.path.append("C:/src/TubeTK_Python_ITK/SlicerExecutionModel-build/GenerateCLP/Release") #sys.path.append("C:/src/TubeTK_Python_ITK/ITK-build/bin/") #sys.path.append("C:/src/TubeTK_Python_ITK/ITK-build/bin/Release") #sys.path.append("C:/src/TubeTK_Python_ITK/TubeTK-build/bin/") #sys.path.append("C:/src/TubeTK_Python_ITK/TubeTK-build/bin/Release") sys.path.append("C:/src/TubeTK_Python_ITK/TubeTK-build/lib/") sys.path.append("C:/src/TubeTK_Python_ITK/TubeTK-build/lib/Release") #sys.path.append("C:/src/TubeTK_Python_ITK/VTK-build/bin/") #sys.path.append("C:/src/TubeTK_Python_ITK/VTK-build/bin/Release") # Setting TubeTK Build Directory TubeTK_BUILD_DIR=None if 'TubeTK_BUILD_DIR' in os.environ: TubeTK_BUILD_DIR = os.environ['TubeTK_BUILD_DIR'] else: print('TubeTK_BUILD_DIR not found!') print(' Set environment variable') os.environ["TubeTK_BUILD_DIR"] = "C:/src/TubeTK_Python_ITK/TubeTK-build" TubeTK_BUILD_DIR = os.environ["TubeTK_BUILD_DIR"] #sys.exit( 1 ) if not os.path.exists(TubeTK_BUILD_DIR): print('TubeTK_BUILD_DIR set by directory not found!') print(' TubeTK_BUILD_DIR = ' + TubeTK_BUILD_DIR ) sys.exit(1) # Setting ITK Build Directory and importing ITK try: import itk except: ITK_BUILD_DIR = None if 'ITK_BUILD_DIR' in os.environ: ITK_BUILD_DIR = os.environ['ITK_BUILD_DIR'] else: print('ITK_BUILD_DIR not found!') print(' Set environment variable') os.environ["ITK_BUILD_DIR"] = "C:/src/TubeTK_Python_R/ITK-build" ITK_BUILD_DIR = os.environ["ITK_BUILD_DIR"] #sys.exit( 1 ) if not os.path.exists(ITK_BUILD_DIR): print('ITK_BUILD_DIR set by directory not found!') print(' ITK_BUIDL_DIR = ' + ITK_BUILD_DIR ) sys.exit(1) # Append ITK libs sys.path.append("C:/src/TubeTK_Python_ITK/ITK-build/Wrapping/Generators/Python/Release") sys.path.append("C:/src/TubeTK_Python_ITK/ITK-build/lib/Release") sys.path.append("C:/src/TubeTK_Python_ITK/ITK-build/lib") # Append TubeTK libs sys.path.append("C:/src/TubeTK_Python_ITK/TubeTK-build/ITKModules/TubeTKITK-build/Wrapping/Generators/Python/Release") import itk from itk import TubeTKITK as itktube """ Explanation: Train Vascular Model Notebook This notebook contains an example of how to train a vascular model for automatic vessel segmentation from a CTA. For this example we are using CTA of the head. This example requires an expert mask as an input. Expert mask is a binary image volume, where vessels are marked 1 and rest is 0. It also requires one more mask, which serves as mask of the brain region with in the head CTA. If ITK is not installed in your python environment, you need to define the environment variable ITK_BUILD_DIR that contains the path to where ITK was built. We need to find the directory in which TubeTK was build. This is required to find the path to the testing data, and may be also required to find the TubeTK library paths if your python environment does not include it. The environment variable TubeTK_BUILD_DIR needs to be defined. End of explanation """ Dimension = 3 PixelType = itk.F CTImageFileName = os.path.join(TubeTK_BUILD_DIR, 'MIDAS_Data\inputCTA.mha') ExpertMaskImageFileName = os.path.join(TubeTK_BUILD_DIR, 'MIDAS_Data\inputExpertMask.mha') MaskImageFileName = os.path.join(TubeTK_BUILD_DIR, 'MIDAS_Data\inputMask.mha') SpatialObjectType = itk.SpatialObject[Dimension] """ Explanation: Initialization End of explanation """ ImageType = itk.Image[PixelType, Dimension] ImageReaderType = itk.ImageFileReader[ImageType] imageReader1 = ImageReaderType.New() imageReader1.SetFileName(CTImageFileName) imageReader1.Update() CTImage = imageReader1.GetOutput() imageReader2 = ImageReaderType.New() imageReader2.SetFileName(ExpertMaskImageFileName) imageReader2.Update() ExpertMaskImage = imageReader2.GetOutput() imageReader3 = ImageReaderType.New() imageReader3.SetFileName(MaskImageFileName) imageReader3.Update() MaskImage = imageReader3.GetOutput() """ Explanation: Read the input images End of explanation """ boundary = itk.Index[3]() boundary.Fill(10) #Create the crop image filter CropImageFilterType = itktube.CropImage[ImageType, ImageType] cropImageFilter = CropImageFilterType.New() cropImageFilter.SetBoundary(boundary) #cropImageFilter.SetMatchVolume(MaskImage) #Giving error #Crop Input CTA cropImageFilter.SetInput(CTImage) cropImageFilter.Update() croppedCTImage = cropImageFilter.GetOutput() #Crop Expert Mask cropImageFilter.SetInput(ExpertMaskImage) cropImageFilter.Update() croppedExpertMaskImage = cropImageFilter.GetOutput() #Crop Mask cropImageFilter.SetInput(MaskImage) cropImageFilter.Update() croppedMaskImage = cropImageFilter.GetOutput() """ Explanation: STEP 1: Crop the input volumes to make them of same size as MaskImage End of explanation """ boundary = itk.Index[3]() boundary.Fill(10) #Create the resample image filter ResampleImageFilterType = itktube.ResampleImage[ImageType, ImageType] #Resample Input CTA resampleImageFilter1 = ResampleImageFilterType.New() resampleImageFilter1.SetInput(croppedCTImage) resampleImageFilter1.SetMakeIsotropic(True) resampleImageFilter1.SetInterpolator("Sinc") resampleCTImage = resampleImageFilter1.GetOutput() #Resample Expert Mask resampleImageFilter2 = ResampleImageFilterType.New() resampleImageFilter2.SetInput(croppedExpertMaskImage) resampleImageFilter2.SetMakeIsotropic(True) resampleImageFilter2.SetInterpolator("NearestNeighbor") resampleExpertMaskImage = resampleImageFilter2.GetOutput() #Resample Mask resampleImageFilter3 = ResampleImageFilterType.New() resampleImageFilter3.SetInput(croppedMaskImage) resampleImageFilter3.SetMakeIsotropic(True) resampleImageFilter3.SetInterpolator("NearestNeighbor") resampleMaskImage = resampleImageFilter3.GetOutput() """ Explanation: STEP 2: Resample the cropped images. End of explanation """ # create resampleMaskImage -> erodedResampleMaskImage # resampleExpertMaskImage -> erodedResampleExpertMaskImage # resampleCTImage -> maskedResampleCTImage """ Explanation: STEP 3: Create Mask-only images. this step required Image Math End of explanation """ # Create image to save not-vessel mask. ShortImageType = itk.Image[itk.S, Dimension] notVesselMaskImage = ShortImageType.New() #Create Compute Training Mask Filter ComputeTrainingMaskFilterType = itktube.ComputeTrainingMask[ImageType] computeTrainingMaskFilter = ComputeTrainingMaskFilterType.New() computeTrainingMaskFilter.SetInput(erodedResampleExpertMaskImage) computeTrainingMaskFilter.SetNotVesselMask(notVesselMaskImage) computeTrainingMaskFilter.SetGap(0.5) computeTrainingMaskFilter.SetNotVesselWidth(2) computeTrainingMaskFilter.Update() expertTrainMaskImage = computeTrainingMaskFilter.GetOutput() """ Explanation: STEP 4: Compute Training Mask End of explanation """ DiscriminantInfoFileName = os.path.join(TubeTK_BUILD_DIR, 'Temporary\\vascularModel.mrs') enhancedScalesExpertMaskImage = ImageType.New() # Create EnhanceTubesUsingDiscriminantAnalysis Filter EnhanceTubesUsingDiscriminantAnalysisFilterType = itktube.EnhanceTubesUsingDiscriminantAnalysis[ImageType, ImageType] ETUDAFilter = EnhanceTubesUsingDiscriminantAnalysisFilterType.New() ETUDAFilter.SetInput(maskedResampleCTImage) ETUDAFilter.SetLabelMap(expertTrainMaskImage) ETUDAFilter.SetTubeId(255) ETUDAFilter.SetBackgroundId(127) ETUDAFilter.SetSaveDiscriminantInfo(DiscriminantInfoFileName) ETUDAFilter.SetOutputSeedScaleImage(enhancedScalesExpertMaskImage) ETUDAFilter.SetTubeScales(0.4,0.8,1.2,1.6) enhancedExpertMaskImage = ETUDAFilter.GetOutput() """ Explanation: STEP 5: Enhance Vessels in maskedResampleCTImage End of explanation """ vasculaModelParameterFileName = os.path.join(TubeTK_BUILD_DIR, 'Temporary\\vascularModel.mtp') # Create SegmentTubesParameters Filter ComputeSegmentTubesParametersFilterType = itktube.ComputeSegmentTubesParameters[ImageType] CSTPFilter = ComputeSegmentTubesParametersFilterType.New() CSTPFilter.SetInput(maskedResampleCTImage) CSTPFilter.SetMaskImage(expertTrainMaskImage) CSTPFilter.SetScaleImage(enhancedScalesExpertMaskImage) CSTPFilter.SetParametersFileName(vasculaModelParameterFileName) CSTPFilter.Update() """ Explanation: STEP 6: Compute Segment tubes Parameters End of explanation """ # enhancedExpertMaskImage -> vesselEnhancedExpertMaskImage """ Explanation: STEP 7: This step requires Image Math End of explanation """ # Create SegmentBinaryImageSkeleton Filter SegmentBinaryImageSkeletonFilterType = itktube.SegmentBinaryImageSkeleton[Imagetype] SBISFilter = SegmentBinaryImageSkeletonFilterType.New() SBISFilter.SetInput(vesselEnhancedExpertMaskImage) SBISFilter.Update() seedsVesselEnhancedExpertMaskImage = SBISFilter.GetOutput() """ Explanation: STEP 8: Compute Seeds End of explanation """ outputVesselsFileName = os.path.join(TubeTK_BUILD_DIR, 'Temporary\\outputVessels.tre') # Create SegmentTubes Filter SegmentTubesFilterType = itktube.SegmentTubes[ImageType] SegmenttubesFilter = SegmentTubesFilterType.New() SegmenttubesFilter.SetInput(maskedResampleCTImage) """ Explanation: STEP 9: Segment Tubes End of explanation """
gtrichards/PHYS_T480
TimeSeries2.ipynb
mit
import numpy as np from matplotlib import pyplot as plt from astroML.time_series import generate_power_law from astroML.fourier import PSD_continuous N = 2014 dt = 0.01 beta = 2 t = dt * np.arange(N) y = generate_power_law(# Complete f, PSD = PSD_continuous(# Complete fig = plt.figure(figsize=(8, 4)) ax1 = fig.add_subplot(121) ax1.plot(t, y, '-k') ax1.set_xlim(0, 10) ax2 = fig.add_subplot(122, xscale='log', yscale='log') ax2.plot(f, PSD, '-k') ax2.set_xlim(1E-1, 60) ax2.set_ylim(1E-11, 1E-3) plt.show() """ Explanation: Analysis of Stochastic Processes ($\S$ 10.5) If a system is always variable, but the variability is not (infinitely) predictable, then we have a stochastic process. Counter to what you may think, these processes can also be characterized. Take a (stochastically varying) quasar which has both line and continuum emission and where the line emission is stimulated by the continuum. Since there is a physical separation between the regions that produce each type of emission, we get a delay between the light curves as can be seen here: To understand stochastic processes, let's first talk about correlation functions. A correlation function ($\S$ 6.5) gives us information about the time delay between 2 processes. If one time series is derived from another simply by shifting the time axis by $t_{\rm lag}$, then their correlation function will have a peak at $\Delta t = t_{\rm lag}$. The correlation function between $f(t)$, and $g(t)$ is defined as $${\rm CF}(\Delta t) = \frac{\lim_{T\rightarrow \infty}\frac{1}{T}\int_T f(t)g(t+\Delta t)dt }{\sigma_f \sigma_g}$$ Computing the correlation function is basically the mathematical processes of sliding the two curves over each other and computing the degree of similarity for each step in time. The peak of the correlation function reveals the time delay between the processes. Below we have the correlation function of the line and continuum emission from a quasar, which reveals a $\sim$ 15 day delay between the two. In an autocorrelation function (ACF), $f(t)= g(t)$ and we instead are revealing information about variability timescales present in a process. If the values of $y$ are uncorrelated, then ACF$(\Delta t)=0$. The Fourier Transform of an ACF is the Power Spectral Density (PSD). So, the PSD is an analysis in frequency space and the ACF is in time space. For example, for a sinusoidal function in time space, the ACF will have period, $T$, and the PSD in frequency space is a $\delta$ function centered on $\omega = 1/2\pi T$. The structure function is another quantity that is frequently used in astronomy and is related to the ACF: $${\rm SF}(\Delta t) = {\rm SF}_\infty[1 - {\rm ACF}(\Delta t)]^{1/2},$$ where ${\rm SF}_\infty$ is the standard deviation of the time series as evaluated on timescales much larger than any charateristic timescale. If ${\rm SF} \propto t^{\alpha}$, then ${\rm PSD} \propto \frac{1}{f^{1+2\alpha}}$. So an analysis of a stochastic system can be done with either the ACF, SF, or PSD. AstroML has time series and Fourier tools for generating light curves drawn from a power law in frequency space. Note that these tools define $\beta = 1+2\alpha$. Complete the cell below to make a plot of counts vs. time and of the PSD vs. frequency for both a $1/f$ and a $1/f^2$ process. (Where the latter is known as Brownian motion or a random walk.) End of explanation """ # Ivezic, Figure 10.29 # Author: Jake VanderPlas # License: BSD # The figure produced by this code is published in the textbook # "Statistics, Data Mining, and Machine Learning in Astronomy" (2013) # For more information, see http://astroML.github.com # To report a bug or issue, use the following forum: # https://groups.google.com/forum/#!forum/astroml-general import numpy as np from matplotlib import pyplot as plt from astroML.time_series import generate_power_law from astroML.fourier import PSD_continuous N = 1024 dt = 0.01 factor = 100 t = dt * np.arange(N) random_state = np.random.RandomState(1) fig = plt.figure(figsize=(5, 3.75)) fig.subplots_adjust(wspace=0.05) for i, beta in enumerate([1.0, 2.0]): # Generate the light curve and compute the PSD x = factor * generate_power_law(N, dt, beta, random_state=random_state) f, PSD = PSD_continuous(t, x) # First axes: plot the time series ax1 = fig.add_subplot(221 + i) ax1.plot(t, x, '-k') ax1.text(0.95, 0.05, r"$P(f) \propto f^{-%i}$" % beta, ha='right', va='bottom', transform=ax1.transAxes) ax1.set_xlim(0, 10.24) ax1.set_ylim(-1.5, 1.5) ax1.set_xlabel(r'$t$') # Second axes: plot the PSD ax2 = fig.add_subplot(223 + i, xscale='log', yscale='log') ax2.plot(f, PSD, '-k') ax2.plot(f[1:], (factor * dt) ** 2 * (2 * np.pi * f[1:]) ** -beta, '--k') ax2.set_xlim(1E-1, 60) ax2.set_ylim(1E-6, 1E1) ax2.set_xlabel(r'$f$') if i == 1: ax1.yaxis.set_major_formatter(plt.NullFormatter()) ax2.yaxis.set_major_formatter(plt.NullFormatter()) else: ax1.set_ylabel(r'${\rm counts}$') ax2.set_ylabel(r'$PSD(f)$') plt.show() """ Explanation: You should find that, because the power at high frequency is larger for $1/f$, that light curve will look noisier. We can even hear the difference: https://www.youtube.com/watch?v=3vEDZ-_iLNU) End of explanation """ # Syntax for EK and Scargle ACF computation import numpy as np from astroML.time_series import generate_damped_RW from astroML.time_series import ACF_scargle, ACF_EK t = np.arange(0,1000) y = generate_damped_RW(t, tau=300) dy = 0.1 y = np.random.normal(y,dy) ACF_scargle, bins_scargle = ACF_scargle(t,y,dy) ACF_EK, ACF_err_EK, bins_EK = ACF_EK(t,y,dy) """ Explanation: ACF for Unevenly Sampled Data astroML also has tools for computing the ACF of unevenly sampled data using two different (Scargle) and (Edelson & Krolik) methods: http://www.astroml.org/modules/classes.html#module-astroML.time_series One of the tools is for generating a damped random walk (DRW). Above we found that a random walk had a $1/f^2$ PSD. A damped random walk is a process "remembers" its history only for a characteristic time, $\tau$. The ACF vanishes for $\Delta t \gg \tau$. End of explanation """ # Ivezic, Figure 10.30 # Author: Jake VanderPlas # License: BSD # The figure produced by this code is published in the textbook # "Statistics, Data Mining, and Machine Learning in Astronomy" (2013) # For more information, see http://astroML.github.com # To report a bug or issue, use the following forum: # https://groups.google.com/forum/#!forum/astroml-general import numpy as np from matplotlib import pyplot as plt from astroML.time_series import lomb_scargle, generate_damped_RW from astroML.time_series import ACF_scargle, ACF_EK #------------------------------------------------------------ # Generate time-series data: # we'll do 1000 days worth of magnitudes t = np.arange(0, 1E3) z = 2.0 tau = 300 tau_obs = tau / (1. + z) np.random.seed(6) y = generate_damped_RW(t, tau=tau, z=z, xmean=20) # randomly sample 100 of these ind = np.arange(len(t)) np.random.shuffle(ind) ind = ind[:100] ind.sort() t = t[ind] y = y[ind] # add errors dy = 0.1 y_obs = np.random.normal(y, dy) #------------------------------------------------------------ # compute ACF via scargle method C_S, t_S = ACF_scargle(t, y_obs, dy, n_omega=2 ** 12, omega_max=np.pi / 5.0) ind = (t_S >= 0) & (t_S <= 500) t_S = t_S[ind] C_S = C_S[ind] #------------------------------------------------------------ # compute ACF via E-K method C_EK, C_EK_err, bins = ACF_EK(t, y_obs, dy, bins=np.linspace(0, 500, 51)) t_EK = 0.5 * (bins[1:] + bins[:-1]) #------------------------------------------------------------ # Plot the results fig = plt.figure(figsize=(8, 8)) # plot the input data ax = fig.add_subplot(211) ax.errorbar(t, y_obs, dy, fmt='.k', lw=1) ax.set_xlabel('t (days)') ax.set_ylabel('observed flux') # plot the ACF ax = fig.add_subplot(212) ax.plot(t_S, C_S, '-', c='gray', lw=1, label='Scargle') ax.errorbar(t_EK, C_EK, C_EK_err, fmt='.k', lw=1, label='Edelson-Krolik') ax.plot(t_S, np.exp(-abs(t_S) / tau_obs), '-k', label='True') ax.legend(loc=3) ax.plot(t_S, 0 * t_S, ':', lw=1, c='gray') ax.set_xlim(0, 500) ax.set_ylim(-1.0, 1.1) ax.set_xlabel('t (days)') ax.set_ylabel('ACF(t)') plt.show() """ Explanation: Figure 10.30 below gives an example of an ACF for a DRW, which mimics the variability that we might see from a quasar. (Note that the Scargle method doesn't seem to be working.) End of explanation """ %matplotlib inline import numpy as np from matplotlib import pyplot as plt from matplotlib.ticker import MultipleLocator N=10 #epsilon = np.array([0,0,0,1,0,0,0,0,0,0,0,0]) epsilon = np.zeros(N+2) epsilon[3] = 1 yAR=np.zeros(N+2) yMA=np.zeros(N+2) yARMA=np.zeros(N+2) for i in np.arange(N)+2: # Complete using the coefficients given in the legend text below yAR[i] = yMA[i] = yARMA[i] = #print i, yAR[i], yMA[i] fig = plt.figure(figsize=(6, 6)) t = np.arange(len(yAR)) plt.plot(t,yAR,label="AR(2), a_1=0.5, a_2=0.5") plt.plot(t,yMA,label="MA(2), b_1=0.5, b_2=0.5") plt.plot(t,yARMA,label="ARMA(2,1), a_1=0.5, a_2=0.25, b_1=0.5",zorder=0) plt.xlabel("t") plt.ylabel("y") plt.legend(loc="upper right",prop={'size':8}) plt.ylim([0,1.1]) ax = plt.axes() ax.xaxis.set_major_locator(plt.MultipleLocator(1.0)) plt.show() """ Explanation: Autoregressive Models For processes like these that are not periodic, but that "retain memory" of previous states, we can use autogressive models. A random walk is an example of such a process; every new value is given by the preceeding value plus some noise: $$y_i = y_{i-1} + \epsilon_i.$$ If the coefficient of $y_{i-1}$ is $>1$ then it is known as a geometric random walk, which is typical of the stock market. (So, when you interview for a quant position on Wall Street, you tell them that you are an expert in using autoregressive geometric random walks to model stochastic processes.) In the random walk case above, each new value depends only on the immediately preceeding value. But we can generalized this to include $p$ values: $$y_i = \sum_{j=1}^pa_jy_{i-j} + \epsilon_i$$ We refer to this as an autoregressive (AR) process of order $p$: $AR(p)$. For a random walk, we have $p=1$, and $a_1=1$. If the data are drawn from a "stationary" process (one where it doesn't matter what region of the light curve you sample [so long as it is representative]), the $a_j$ satisfy certain conditions. One thing that we might do then is ask whether a system is more consistent with $a_1=0$ or $a_1=1$ (noise vs. a random walk). Below are some example light curves for specific $AR(p)$ processes. In the first example, $AR(0)$, the light curve is simply responding to noise fluctuations. In the second example, $AR(1)$, the noise fluctuation responses are persisting for slightly longer as the next time step depends positively on the time before. For the 3rd example, nearly the full effect of the noise spike from the previous time step is applied again, giving particularly long and high chains of peaks and valleys. In the 4th example, $AR(2)$, we have long, but low chains of peaks and valleys as a spike persists for an extra time step. Finally, in the 5th example, the response of a spike in the second time step has the opposite sign as for the first time step, and both have large coefficients, so the peaks and valleys are both quite high and quite narrowly separated. A moving average (MA) process is similar in some ways to an AR process, but is different in other ways. It is defined as $$y_i = \epsilon_i + \sum_{j=1}^qb_j\epsilon_{i-j}.$$ So, for example, an MA(q=1) process would look like $$y_i = \epsilon_{i} + b_1\epsilon_{i-1},$$ whereas an AR(p=2) process would look like $$y_i = a_1y_{i-1} + a_2y_{i-2} + \epsilon_i$$ Thus the $MA$ process is similar to an $AR$ process in that the next time step depends on the previous time step, but they are different in terms of how they respond to a shock. In an $MA$ process a shock affects only the current value and $q$ values into the future. In an $AR$ process a shock affects all future values. Below is some code and a plot that illustrates this. End of explanation """
nohmapp/acme-for-now
essential_algorithms/Moderate Difficulty.ipynb
mit
letters_map = {'2':'ABC', '3':'DEF', '4':'GHI', '5':'JKL', '6':'MNO', '7':'PQRS', '8':'TUV', '9':'WXYZ'} def printWords(number, ): #number is phone number def printWordsUtil(numb, curr_digit, output, n): if curr_digit == n: print('%s ' % output) return for i in range(len(letters_map[numb[curr_digit]])): output[curr_digit] = letters_map[number[curr_digit]][i] printWordsUtil(numb, curr_digit+1, output, n) if numb[curr_digit] == 0 or numb[curr_digit] == 1: return def gen_phone(digits): results = [] lookup = { '0': ' ', '1': ' ', '2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl', '6': 'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz', } def decode_next(s, i): if i == len(digits): results.append(s) return for c in lookup[digits[i]]: decode_next(s + c, i + 1) decode_next('', 0) return results """ Explanation: Phone Digits Given a phone number create a list of all the possible words that you can make given a dictionary from numbers to letters. In python there is a itertools.permutations('abc') that would print all permutations given some input. ```python import itertools itertools.permutations('abc') [i for i in itertools.permutations('abc')] output permutations ``` End of explanation """ # Dynamic programming implementation of LCS problem # Returns length of LCS for X[0..m-1], Y[0..n-1] def lcs(X, Y, m, n): L = [[0 for x in xrange(n+1)] for x in xrange(m+1)] # Following steps build L[m+1][n+1] in bottom up fashion. Note # that L[i][j] contains length of LCS of X[0..i-1] and Y[0..j-1] for i in xrange(m+1): for j in xrange(n+1): if i == 0 or j == 0: L[i][j] = 0 elif X[i-1] == Y[j-1]: L[i][j] = L[i-1][j-1] + 1 else: L[i][j] = max(L[i-1][j], L[i][j-1]) # Following code is used to print LCS index = L[m][n] # Create a character array to store the lcs string lcs = [""] * (index+1) lcs[index] = "\0" # Start from the right-most-bottom-most corner and # one by one store characters in lcs[] i = m j = n while i > 0 and j > 0: # If current character in X[] and Y are same, then # current character is part of LCS if X[i-1] == Y[j-1]: lcs[index-1] = X[i-1] i-=1 j-=1 index-=1 # If not same, then find the larger of two and # go in the direction of larger value elif L[i-1][j] > L[i][j-1]: i-=1 else: j-=1 print "LCS of " + X + " and " + Y + " is " + "".join(lcs) # Driver program X = "AGGTAB" Y = "GXTXAYB" m = len(X) n = len(Y) lcs(X, Y, m, n) passed in a list of dictionaries also passed a character passed single characted to int if a character does not exist in the dict then the defualt value it zero find the highest possisble value for a character in the dicts now design it to take an abatrary operator and reutrn the highest value based on the operator and then have it return ascending and descending order """ Explanation: Print Longest Common Subsequence This is a good problem for working out variations where you count contiguous subsequence versus non continuous The move with longest common subsequence is to start from the back of the strings and see if the letters are the same. Then increment with a dynamic programming approach where End of explanation """ import time import math class TimeTravelDict: def __init__(self): self.dict = {} def get(self, key, time): if not self.dict[key]: return -1 most_recent, value = math.inf, None for a, b in self.dict[key]: if b < time: if (time - b) < most_recent: most_recent = b value = a if value == None: return -1 else: return value def put(self, key, value): if not key in self.dict: self.dict[key] = [(value, time.time())] self.dict[key].append((value, time.time())) print(self.dict[key]) tt = TimeTravelDict() tt.put('a', 11) tt.put('a', 12) tt.put('a', 13) tt.put('a', 14) tt.get('a', 1513571590.2447577) """ Explanation: Time Travelling dictionary Design a time traveling dictionary, has a get and put function where the get function takes a time and returns the corresponding value at the time. End of explanation """ #[2::][1::2] import collections words = ["baa", "", "abcd", "abca", "cab", "cad"] def alienOrder(words): pre, suc = collections.defaultdict(set), collections.defaultdict(set) for pair in zip(words, words[1:]): print(pair) for a, b in zip(*pair): if a != b: suc[a].add(b) pre[b].add(a) break print('succ %s' % suc) print('pred %s' % pre) chars = set(''.join(words)) print('chars %s' % chars) print(set(pre)) free = chars - set(pre) print('free %s' % free) order = '' while free: a = free.pop() order += a for b in suc[a]: pre[b].discard(a) if not pre[b]: free.add(b) if set(order) == chars: return order else: False # return order * (set(order) == chars) alienOrder(words) """ Explanation: Alien Dictionary Given a sorted dictionary of an alien language, find order of characters ```python Input: words[] = {"baa", "abcd", "abca", "cab", "cad"} Output: Order of characters is 'b', 'd', 'a', 'c' Note that words are sorted and in the given language "baa" comes before "abcd", therefore 'b' is before 'a' in output. Similarly we can find other orders. Input: words[] = {"caa", "aaa", "aab"} Output: Order of characters is 'c', 'a', 'b' ``` The idea is to create a graph of characters a then find topological sorting of the graph. 1. Create a graph g with number of vertices equal to the size of alphabet in the given language. For example, if the alphabet size is 5, then there can be 5 characters in words. Initially there are no edges in graph. 2. DO the following for every pair of adjacent words in given sorted array. 1. Let the current pair of words be word1 and word2. One by one compare characters of both words and find the mismatching characters. 2. Create an edge in g from mismatching character of word1 to that of word2. 3. Print topological sorting of the above created graph. End of explanation """ def binarySearch(alist, value): mini = 0 maxi = len(alist) while mini <= maxi: print('here') pivot = (maxi - mini) // 2 current_value = alist[pivot] if current_value < value: mini = pivot + 1 elif current_value > value: maxi = pivot -1 else: return pivot return pivot or -1 test1 = [0, 5, 10 , 23, 46, 49, 78] test2 = [0, 5, 10] test3 = [0] print(binarySearch(test1, 49)) print(binarySearch(test2, 10)) binarySearch(test3, 90) """ Explanation: Binary Search End of explanation """
mtasende/Machine-Learning-Nanodegree-Capstone
notebooks/dev/n04B_evaluation_infrastructure.ipynb
mit
from predictor import evaluation as ev from predictor.dummy_mean_predictor import DummyPredictor predictor = DummyPredictor() y_train_true_df, y_train_pred_df, y_val_true_df, y_val_pred_df = ev.run_single_val(x, y, ahead_days, predictor) print(y_train_true_df.shape) print(y_train_pred_df.shape) print(y_val_true_df.shape) print(y_val_pred_df.shape) y_train_true_df.head() y_train_pred_df.head() y_val_true_df.head() y_val_pred_df.head() """ Explanation: Get the results of a single run End of explanation """ y_train_true_rs = ev.reshape_by_symbol(y_train_true_df) print(y_train_true_rs.shape) y_train_true_rs.head() y_train_pred_rs = ev.reshape_by_symbol(y_train_pred_df) print(y_train_pred_rs.shape) y_train_pred_rs.head() y_val_true_rs = ev.reshape_by_symbol(y_val_true_df) print(y_val_true_rs.shape) y_val_true_rs.head() """ Explanation: Done. Let's test the reshape_by_symbol function End of explanation """ u = x.index.levels[0][0] print(u) fe.SPY_DF.sort_index().index.unique() md = fe.SPY_DF.index.unique() u in md fe.add_market_days(u,6) """ Explanation: So, the reshape_by_symbol function seems to work with run_single_val. It could be added to it. Let's test the roll_evaluate function. End of explanation """ # Getting the data GOOD_DATA_RATIO = 0.99 data_df = pd.read_pickle('../../data/data_train_val_df.pkl') sys.path.append('../../') import predictor.feature_extraction as fe import utils.preprocessing as pp data_df = pp.drop_irrelevant_symbols(data_df, GOOD_DATA_RATIO) train_time = -1 # In real time days base_days = 7 # In market days step_days = 7 # market days ahead_days = 1 # market days today = data_df.index[-1] # Real date tic = time() x, y = fe.generate_train_intervals(data_df, train_time, base_days, step_days, ahead_days, today, fe.feature_close_one_to_one) toc = time() print('Elapsed time: %i seconds.' % (toc-tic)) print(data_df.shape) data_df.head() SAMPLES_GOOD_DATA_RATIO = 0.9 x_y_df = pd.concat([x, y], axis=1) x_y_df = pp.drop_irrelevant_samples(x_y_df, SAMPLES_GOOD_DATA_RATIO) x = x_y_df.iloc[:, :-1] y = x_y_df.iloc[:, -1] x = pp.fill_missing(x) x_y_df.isnull().sum() x.isnull().sum().sum() y.isnull().sum() x_reshaped = ev.reshape_by_symbol(x) x_reshaped.head() x_reshaped.isnull().sum().max() x.shape x_reshaped.shape x_reshaped[x_reshaped.notnull()] y_train_true_df, y_train_pred_df, y_val_true_df, y_val_pred_df = ev.run_single_val(x, y, ahead_days, predictor) from sklearn.metrics import r2_score r2_score(y_train_true_df, y_train_pred_df, multioutput='raw_values') tickers = y_train_true_df.index.levels[1] tickers y_train_true_df.loc[(slice(None), 'AAPL'),:] from sklearn.metrics import r2_score r2_train_score = [] mre_train = [] for ticker in tickers: y_true = y_train_true_df.loc[(slice(None), ticker),:] y_pred = y_train_pred_df.loc[(slice(None), ticker),:] r2_train_score.append(r2_score(y_true, y_pred)) mre_train.append(ev.mre(y_true, y_pred)) np.mean(r2_train_score) np.mean(mre_train) plt.plot(mre_train) ev.get_metrics(y_train_true_df, y_train_pred_df) train_days = 252 x_y_sorted = pd.concat([x, y], axis=1).sort_index() start_date = x_y_sorted.index.levels[0][0] end_date = fe.add_market_days(start_date, train_days) start_date end_date start_date + ((end_date - start_date) / 2) train_days = 252 step_eval_days = 30 r2, mre, y_val_true_df, y_val_pred_df, mean_dates = ev.roll_evaluate(x, y, train_days, step_eval_days, ahead_days, predictor, verbose=True) print(r2.shape) print(mre.shape) print(y_val_true_df.shape) print(y_val_pred_df.shape) print(mean_dates.shape) plt.plot(mean_dates, r2[:, 0], 'b', label='Mean r2 score') plt.plot(mean_dates, r2[:, 0] + 2*r2[:, 1], 'r') plt.plot(mean_dates, r2[:, 0] - 2*r2[:, 1], 'r') plt.xlabel('Mean date of the training period') plt.legend() plt.grid() plt.plot(mean_dates, mre[:, 0], 'b', label='Mean MRE') plt.plot(mean_dates, mre[:, 0] + 2*mre[:, 1], 'r') plt.plot(mean_dates, mre[:, 0] - 2*mre[:, 1], 'r') plt.xlabel('Mean date of the training period') plt.legend() plt.grid() y_val_true_df.head() y_val_pred_df.head() r2_scores, mre_scores, tickers = ev.get_metrics(y_val_true_df, y_val_pred_df) eval_df = pd.DataFrame(np.array([r2_scores, mre_scores]).T, index=tickers, columns=['r2', 'mre']) eval_df.head() eval_df['mre'].plot() eval_df['r2'].plot() eval_df.sort_values(by='mre', ascending=False) plt.scatter(eval_df['r2'], eval_df['mre']) eval2_df = ev.get_metrics_df(y_val_true_df, y_val_pred_df) eval2_df.head() """ Explanation: Let's do some previous filtering to avoid problems End of explanation """
tpin3694/tpin3694.github.io
machine-learning/.ipynb_checkpoints/calculate_difference_between_dates_and_times-checkpoint.ipynb
mit
# Load library import pandas as pd """ Explanation: Title: Calculate Difference Between Dates And Times Slug: calculate_difference_between_dates_and_times Summary: How to calculate differences between dates and times for machine learning in Python. Date: 2017-09-11 12:00 Category: Machine Learning Tags: Preprocessing Dates And Times Authors: Chris Albon Preliminaries End of explanation """ # Create data frame df = pd.DataFrame() # Create two datetime features df['Arrived'] = [pd.Timestamp('01-01-2017'), pd.Timestamp('01-04-2017')] df['Left'] = [pd.Timestamp('01-01-2017'), pd.Timestamp('01-06-2017')] """ Explanation: Create Date And Time Data End of explanation """ # Calculate duration between features df['Left'] - df['Arrived'] """ Explanation: Calculate Difference (Method 1) End of explanation """ # Calculate duration between features pd.Series(delta.days for delta in (df['Left'] - df['Arrived'])) """ Explanation: Calculate Difference (Method 2) End of explanation """
mmoll/hammer-cli
rel-eng/gem_release.ipynb
gpl-3.0
%cd .. """ Explanation: Release of hammer-cli gem Requirements push access to https://github.com/theforeman/hammer-cli push access to rubygems.org for hammer-cli sudo yum install transifex-client python-slugify asciidoc ensure neither the git push or gem push don't require interractive auth. If you can't use api key or ssh key to auth skip these steps and run them form the shell manually to push translations you need an account on Transifex Release process Follow the steps with &lt;Shift&gt;+&lt;Enter&gt; or &lt;Ctrl&gt;+&lt;Enter&gt;,&lt;Down&gt; If anything fails, fix it and re-run the step if applicable Release settings End of explanation """ NEW_VERSION = '2.0.0' LAST_VERSION = '0.19.0' DEVELOP_VERSION = '2.1.0-develop' NEXT_FUTURE_VERSION = '2.1.0' MAJOR_RELEASE = True STABLE_BRANCH = '2.0-stable' GIT_REMOTE_UPSTREAM = 'origin' WORK_BRANCH = 'master' if MAJOR_RELEASE else STABLE_BRANCH """ Explanation: Update the following notebook settings End of explanation """ ! git checkout {WORK_BRANCH} ! git fetch {GIT_REMOTE_UPSTREAM} ! git rebase {GIT_REMOTE_UPSTREAM}/{WORK_BRANCH} """ Explanation: Ensure the repo is up to date End of explanation """ ! bundle update ! bundle exec rake test """ Explanation: Run tests localy End of explanation """ ! sed -i 's/Gem::Version.new .*/Gem::Version.new "{NEW_VERSION}"/' lib/hammer_cli/version.rb # Parse git changelog from IPython.display import Markdown as md from subprocess import check_output from shlex import split import re def format_log_entry(entry): issues = re.findall(r'[^(]#([0-9]+)', entry) entry = re.sub(r'([fF]ixes|[rR]efs)[^-]*-\s*(.*)', r'\2', entry) entry = '* ' + entry.capitalize() entry = re.sub(r'\(#([0-9]+)\)', r'([PR #\1](https://github.com/theforeman/hammer-cli/pull/\1))', entry) for i in issues: referenced_issues.append(i) entry = entry + ', [#%s](http://projects.theforeman.org/issues/%s)' % (i, i) return entry def skip(entry): if re.match(r'Merge pull', entry) or \ re.match(r'^i18n', entry) or \ re.match(r'^Bump to version', entry): return True else: return False referenced_issues = [] git_log_cmd = 'git log --pretty=format:"%%s" %s..HEAD' % LAST_VERSION log = check_output(split(git_log_cmd)).decode('utf8').split('\n') change_log = [format_log_entry(e) for e in log if not skip(e)] md('\n'.join(change_log)) # Write release notes from datetime import datetime import fileinput import sys fh = fileinput.input('doc/release_notes.md', inplace=True) for line in fh: print(line.rstrip()) if re.match(r'========', line): print('### %s (%s)' % (NEW_VERSION, datetime.today().strftime('%Y-%m-%d'))) for entry in change_log: print(entry) print('') fh.close() """ Explanation: Update release related stuff End of explanation """ ! git add -u ! git status ! git diff --cached """ Explanation: Manual step: Update deps in the gemspec if neccessary Check what is going to be commited End of explanation """ ! git commit -m "Bump to {NEW_VERSION}" """ Explanation: Commit changes End of explanation """ if MAJOR_RELEASE: ! make -C locale/ tx-update """ Explanation: Update translations End of explanation """ ! git tag {NEW_VERSION} """ Explanation: Tag new version End of explanation """ if MAJOR_RELEASE: ! git checkout -b {STABLE_BRANCH} ! git push {GIT_REMOTE_UPSTREAM} {STABLE_BRANCH} ! git checkout {WORK_BRANCH} """ Explanation: Prepare stable branch for major release End of explanation """ ! rake build ! gem push pkg/hammer_cli-{NEW_VERSION}.gem """ Explanation: Build the gem End of explanation """ if MAJOR_RELEASE: ! sed -i 's/Gem::Version.new .*/Gem::Version.new "{DEVELOP_VERSION}"/' lib/hammer_cli/version.rb if MAJOR_RELEASE: ! git add -u ! git status if MAJOR_RELEASE: ! git diff --cached if MAJOR_RELEASE: ! git commit -m "Bump to {DEVELOP_VERSION}" """ Explanation: Bump the develop version for major release End of explanation """ ! git push {GIT_REMOTE_UPSTREAM} {WORK_BRANCH} ! git push --tags {GIT_REMOTE_UPSTREAM} {WORK_BRANCH} """ Explanation: PUSH the changes upstream If everything is correct End of explanation """ print('\n') print('\n'.join(change_log)) print('\n\nhttps://github.com/theforeman/hammer-cli/releases/new?tag=%s' % NEW_VERSION) from IPython.display import Markdown as md md('### Create new hammer-cli release in Redmine \n' + \ '<a href="https://projects.theforeman.org/projects/hammer-cli/versions/new" target="_blank">https://projects.theforeman.org/projects/hammer-cli/versions/new</a>\n\n' + \ 'Set name to hammer-cli-%s' % (NEXT_FUTURE_VERSION if MAJOR_RELEASE else NEW_VERSION)) if not MAJOR_RELEASE: print('Set fixed in versions to %s in following issues:' % NEW_VERSION) for i in referenced_issues: print('- https://projects.theforeman.org/issues/%s' % i) """ Explanation: Now the new release is in upstream repo Some manual steps follow to improve the UX New relase on GitHub Copy the following changelog lines to the description in form on link below The release title is the new version. End of explanation """
anabranch/data_analysis_with_python_and_pandas
3 - NumPy Basics/3-3 NumPy Array Basics - Vectorization.ipynb
apache-2.0
import sys print(sys.version) import numpy as np print(np.__version__) npa = np.random.random_integers(0,50,20) """ Explanation: NumPy Array Basics - Vectorization End of explanation """ npa """ Explanation: Now I’ve harped on about vectorization in the last couple of videos and I’ve told you that it’s great but I haven’t shown you how it’s so great. Here are the two powerful reasons - Concise - Efficient The fundamental idea behind array programming is that operations apply at once to an entire set of values. This makes it a high-level programming model as it allows the programmer to think and operate on whole aggregates of data, without having to resort to explicit loops of individual scalar operations. You can read more here: https://en.wikipedia.org/wiki/Array_programming End of explanation """ npa * 2 npa ** 3 [x * 2 for x in npa] """ Explanation: With vectorization we can apply changes to the entire array extremely efficiently, no more for loops. If we want to double the array, we just multiply by 2 if we want to cube it we just cube it. End of explanation """ def new_func(numb): if numb < 10: return numb**3 else: return numb**2 new_func(npa) """ Explanation: So who cares? Again it’s going to be efficiency thing just like boolean selection Let’s try something a bit more complex. Define a function named new_func that cubes the value if it is less than 5 and squares it if it is greater or equal to 5. End of explanation """ ?np.vectorize """ Explanation: However we can’t just pass in the whole vector because we’re going to get this array ambiguity. End of explanation """ vect_new_func = np.vectorize(new_func) type(vect_new_func) vect_new_func(npa) [new_func(x) for x in npa] """ Explanation: We need to vectorize this operation and we do that with np.vectorize We can then apply that to our entire array and it takes care of the complexity for us. We can think in terms of the data without having to think about each individual element. End of explanation """ %timeit [new_func(x) for x in npa] %timeit vect_new_func(npa) npa2 = np.random.random_integers(0,100,20*1000) """ Explanation: It's also much faster to vectorize operations and while these are simple examples the benefits will become apparent as we continue through this course. this has changed since python3 and the list comprehension has gotten much faster. However, this doesn't mean that vectorization is slower, just that it's a bit heavier because it places a lot more tools at your disposal like we'll see in the next video. End of explanation """ %timeit [new_func(x) for x in npa2] %timeit vect_new_func(npa2) """ Explanation: Speed comparisons with size. End of explanation """
emjotde/UMZ
Cwiczenia/02/Uczenie Maszynowe - Ćwiczenia 2.1 - Wykresy i krzywe.ipynb
cc0-1.0
%matplotlib inline import numpy as np import matplotlib.pyplot as plt ## initialize the axes fig = plt.figure() ax = fig.add_subplot(111) ## format axes ax.set_ylabel('volts') ax.set_title('a sine wave') t = np.arange(0.0, 1.0, 0.01) s = np.sin(2*np.pi*t) line, = ax.plot(t, s, color='blue', lw=2) """ Explanation: 2.1 Matplotlib - Wykresy i krzywe Prosty wykres krzywej End of explanation """ import numpy as np import matplotlib.pyplot as plt ## initialize the figure fig = plt.figure(figsize=(15,10)) ## the data t = np.arange(0.0, 1.0, 0.01) s = np.sin(2*np.pi*t) ## the top axes ax1 = fig.add_subplot(3,1,1) ax1.set_ylabel('volts') ax1.set_title('a sine wave') line1 = ax1.plot(t, s+5.0, color='blue', lw=2) line2 = ax1.plot(t, s+2.5, color='red', lw=2) line3 = ax1.plot(t, s, color='orange', lw=2) ## the middle axes ax2 = fig.add_subplot(3,1,2) ax2.set_ylabel('volts') ax2.set_title('a sine wave') line1 = ax2.plot(t, s+5.0, color='black', lw=2,linestyle="--") line2 = ax2.plot(t, s+2.5, color='black', lw=2,linestyle="-.") line3 = ax2.plot(t, s, color='#000000', lw=2,linestyle=":") ## the thrid axes ax3 = fig.add_subplot(3,1,3) ax3.set_ylabel('volts') ax3.set_title('a sine wave') line1 = ax3.plot(t,s+5.0, color='blue', marker="+") line2 = ax3.plot(t,s+2.5, color='red', marker="o") line3 = ax3.plot(t,s, color='orange', marker="^") ## adjust the space between plots plt.subplots_adjust(wspace=0.2,hspace=.4) """ Explanation: Style wykresów End of explanation """ import matplotlib as mpl from mpl_toolkits.mplot3d import Axes3D import numpy as np import matplotlib.pyplot as plt fig = plt.figure(figsize=(15,10)) ax = fig.add_subplot(111, projection='3d') theta = np.linspace(-4 * np.pi, 4 * np.pi, 50) z = np.linspace(-2, 2, 50) r = z**2 + 1 x = r * np.sin(theta) y = r * np.cos(theta) ax.plot(x, y, z, label='parametric curve') ax.legend() plt.show() """ Explanation: Wykresy 3D End of explanation """ from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm import matplotlib.pyplot as plt import numpy as np fig = plt.figure(figsize=(15,10)) ax = fig.add_subplot(111, projection='3d') X = np.arange(-5, 5, 0.25) Y = np.arange(-5, 5, 0.25) X, Y = np.meshgrid(X, Y) R = np.sqrt(X**2 + Y**2) Z = np.sin(R) surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet, linewidth=0, antialiased=True) ax.set_zlim(-1.01, 1.01) plt.show() """ Explanation: Powierzchnie 3D End of explanation """
KasperPRasmussen/bokeh
examples/howto/charts/deep_dive-attributes.ipynb
bsd-3-clause
from bokeh.charts.attributes import AttrSpec, ColorAttr, MarkerAttr """ Explanation: Bokeh Charts Attributes One of Bokeh Charts main contributions is that it provides a flexible interface for applying unique attributes based on the unique values in column(s) of a DataFrame. Internally, the bokeh chart uses the AttrSpec to define the mapping, but allows the user to pass in their own spec, or utilize a function to produce a customized one. End of explanation """ attr = AttrSpec(items=[1, 2, 3], iterable=['a', 'b', 'c']) attr.attr_map """ Explanation: Simple Examples The AttrSpec assigns values in the iterable to values in items. End of explanation """ attr[1] """ Explanation: You will see that the key in the mapping will be a tuple, and it will always be a tuple. The mapping works like this because the AttrSpec(s) are often used with Pandas DataFrames groupby method. The groupby method can return a single value or a tuple of values when used with multiple columns, so this is just making sure that is consistent. However, you can still access the values in the following way: End of explanation """ color = ColorAttr(items=[1, 2, 3]) color.attr_map """ Explanation: The ColorAttr is just a custom AttrSpec that has a default palette as the iterable, but can be customized, and will likely provide some other color generation functionality. End of explanation """ color = ColorAttr(items=list(range(0, 10))) color.attr_map """ Explanation: Let's assume that you don't know how many unique items you are working with, but you have defined the things that you want to assign the items to. The AttrSpec will automatically cycle the iterable for you. This is important for exploratory analysis. End of explanation """ from bokeh.sampledata.autompg import autompg as df df.head() color_attr = ColorAttr(df=df, columns=['cyl', 'origin']) color_attr.attr_map """ Explanation: Because there are only 6 unique colors in the default palette, the palette repeats starting on the 7th item. Using with Pandas End of explanation """ color_attr.series """ Explanation: You will notice that this is similar to a pandas series with a MultiIndex, which is seen below. End of explanation """ from bokeh.charts.data_source import ChartDataSource fill_color = ColorAttr(df=df, columns=['cyl', 'origin']) ds = ChartDataSource.from_data(df) ds.join_attrs(fill_color=fill_color).head() """ Explanation: You can think of this as a SQL table with 3 columns, two of which are an index. You can imagine how you might join this view data into the original data source to assign these colors to the associated rows. Combining with ChartDataSource End of explanation """ # add new column df['large_displ'] = df['displ'] >= 350 fill_color = ColorAttr(df=df, columns=['cyl', 'origin']) line_color = ColorAttr(df=df, columns=['large_displ']) ds.join_attrs(fill_color=fill_color, line_color=line_color).head(10) """ Explanation: Multiple Attributes End of explanation """ line_color = ColorAttr(df=df, columns=['large_displ'], palette=['Green', 'Red']) ds.join_attrs(fill_color=fill_color, line_color=line_color).head(10) """ Explanation: Custom Iterable You will see that the output contains the combined chart_index and the columns for both attributes. The values of each are joined in based on the original assignment. For example, line_color only has two colors because the large_displ column only has two values. If we wanted to change the true/false, we can modify the ColorAttr. End of explanation """ df_sorted = df.sort(columns=['large_displ'], ascending=False) line_color = ColorAttr(df=df_sorted, columns=['large_displ'], palette=['Green', 'Red'], sort=False) ds.join_attrs(fill_color=fill_color, line_color=line_color).head() """ Explanation: Altering Attribute Assignment Order You may not have wanted to assign the values in the order that occured. So, you would have five options. Pre order the data and tell the attribute not to sort. Make the column a categorical and set the order. Specify the sort options to the AttrSpec Manually specify the items in the order you want them to be assigned. Specify the iterable in the order you want. 1. Pre order the data End of explanation """ df.sort(columns='large_displ').head() import pandas as pd df_cat = df.copy() # create the categorical and set the default (ascending) df_cat['large_displ'] = pd.Categorical.from_array(df.large_displ).reorder_categories([True, False]) # we don't have to sort here, but doing it so you can see the order that the attr spec will see df_cat.sort(columns='large_displ').head() line_color = ColorAttr(df=df_cat, columns=['large_displ'], palette=['Green', 'Red']) ds.join_attrs(fill_color=fill_color, line_color=line_color).head() """ Explanation: 2. Make the column a categorical and set the order We'll show the default sort order of a boolean column, which is ascending. End of explanation """ # the items will be sorted descending (uses same sorting options as pandas) line_color = ColorAttr(df=df, columns=['large_displ'], palette=['Green', 'Red'], sort=True, ascending=False) ds.join_attrs(fill_color=fill_color, line_color=line_color).head() """ Explanation: 3. Specify the sort options to the AttrSpec End of explanation """ # remove df so the items aren't auto-calculated # still need column name for when palette is joined into the dataset line_color = ColorAttr(columns=['large_displ'], items=[True, False], palette=['Green', 'Red']) ds.join_attrs(fill_color=fill_color, line_color=line_color).head() """ Explanation: 4. Manually specify the items in the order you want them End of explanation """ line_color = ColorAttr(df=df, columns=['large_displ'], palette=['Red', 'Green']) ds.join_attrs(fill_color=fill_color, line_color=line_color).head() """ Explanation: 5. Change the order of the iterable End of explanation """
steinam/teacher
jup_notebooks/data-science-ipython-notebooks-master/numpy/02.01-Understanding-Data-Types.ipynb
mit
L = list(range(10)) L type(L[0]) """ Explanation: <!--BOOK_INFORMATION--> <img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png"> This notebook contains an excerpt from the Python Data Science Handbook by Jake VanderPlas; the content is available on GitHub. The text is released under the CC-BY-NC-ND license, and code is released under the MIT license. If you find this content useful, please consider supporting the work by buying the book! No changes were made to the contents of this notebook from the original. <!--NAVIGATION--> < Introduction to NumPy | Contents | The Basics of NumPy Arrays > Understanding Data Types in Python Effective data-driven science and computation requires understanding how data is stored and manipulated. This section outlines and contrasts how arrays of data are handled in the Python language itself, and how NumPy improves on this. Understanding this difference is fundamental to understanding much of the material throughout the rest of the book. Users of Python are often drawn-in by its ease of use, one piece of which is dynamic typing. While a statically-typed language like C or Java requires each variable to be explicitly declared, a dynamically-typed language like Python skips this specification. For example, in C you might specify a particular operation as follows: C /* C code */ int result = 0; for(int i=0; i&lt;100; i++){ result += i; } While in Python the equivalent operation could be written this way: ```python Python code result = 0 for i in range(100): result += i ``` Notice the main difference: in C, the data types of each variable are explicitly declared, while in Python the types are dynamically inferred. This means, for example, that we can assign any kind of data to any variable: ```python Python code x = 4 x = "four" ``` Here we've switched the contents of x from an integer to a string. The same thing in C would lead (depending on compiler settings) to a compilation error or other unintented consequences: C /* C code */ int x = 4; x = "four"; // FAILS This sort of flexibility is one piece that makes Python and other dynamically-typed languages convenient and easy to use. Understanding how this works is an important piece of learning to analyze data efficiently and effectively with Python. But what this type-flexibility also points to is the fact that Python variables are more than just their value; they also contain extra information about the type of the value. We'll explore this more in the sections that follow. A Python Integer Is More Than Just an Integer The standard Python implementation is written in C. This means that every Python object is simply a cleverly-disguised C structure, which contains not only its value, but other information as well. For example, when we define an integer in Python, such as x = 10000, x is not just a "raw" integer. It's actually a pointer to a compound C structure, which contains several values. Looking through the Python 3.4 source code, we find that the integer (long) type definition effectively looks like this (once the C macros are expanded): C struct _longobject { long ob_refcnt; PyTypeObject *ob_type; size_t ob_size; long ob_digit[1]; }; A single integer in Python 3.4 actually contains four pieces: ob_refcnt, a reference count that helps Python silently handle memory allocation and deallocation ob_type, which encodes the type of the variable ob_size, which specifies the size of the following data members ob_digit, which contains the actual integer value that we expect the Python variable to represent. This means that there is some overhead in storing an integer in Python as compared to an integer in a compiled language like C, as illustrated in the following figure: Here PyObject_HEAD is the part of the structure containing the reference count, type code, and other pieces mentioned before. Notice the difference here: a C integer is essentially a label for a position in memory whose bytes encode an integer value. A Python integer is a pointer to a position in memory containing all the Python object information, including the bytes that contain the integer value. This extra information in the Python integer structure is what allows Python to be coded so freely and dynamically. All this additional information in Python types comes at a cost, however, which becomes especially apparent in structures that combine many of these objects. A Python List Is More Than Just a List Let's consider now what happens when we use a Python data structure that holds many Python objects. The standard mutable multi-element container in Python is the list. We can create a list of integers as follows: End of explanation """ L2 = [str(c) for c in L] L2 type(L2[0]) """ Explanation: Or, similarly, a list of strings: End of explanation """ L3 = [True, "2", 3.0, 4] [type(item) for item in L3] """ Explanation: Because of Python's dynamic typing, we can even create heterogeneous lists: End of explanation """ import array L = list(range(10)) A = array.array('i', L) A """ Explanation: But this flexibility comes at a cost: to allow these flexible types, each item in the list must contain its own type info, reference count, and other information–that is, each item is a complete Python object. In the special case that all variables are of the same type, much of this information is redundant: it can be much more efficient to store data in a fixed-type array. The difference between a dynamic-type list and a fixed-type (NumPy-style) array is illustrated in the following figure: At the implementation level, the array essentially contains a single pointer to one contiguous block of data. The Python list, on the other hand, contains a pointer to a block of pointers, each of which in turn points to a full Python object like the Python integer we saw earlier. Again, the advantage of the list is flexibility: because each list element is a full structure containing both data and type information, the list can be filled with data of any desired type. Fixed-type NumPy-style arrays lack this flexibility, but are much more efficient for storing and manipulating data. Fixed-Type Arrays in Python Python offers several different options for storing data in efficient, fixed-type data buffers. The built-in array module (available since Python 3.3) can be used to create dense arrays of a uniform type: End of explanation """ import numpy as np """ Explanation: Here 'i' is a type code indicating the contents are integers. Much more useful, however, is the ndarray object of the NumPy package. While Python's array object provides efficient storage of array-based data, NumPy adds to this efficient operations on that data. We will explore these operations in later sections; here we'll demonstrate several ways of creating a NumPy array. We'll start with the standard NumPy import, under the alias np: End of explanation """ # integer array: np.array([1, 4, 2, 5, 3]) """ Explanation: Creating Arrays from Python Lists First, we can use np.array to create arrays from Python lists: End of explanation """ np.array([3.14, 4, 2, 3]) """ Explanation: Remember that unlike Python lists, NumPy is constrained to arrays that all contain the same type. If types do not match, NumPy will upcast if possible (here, integers are up-cast to floating point): End of explanation """ np.array([1, 2, 3, 4], dtype='float32') """ Explanation: If we want to explicitly set the data type of the resulting array, we can use the dtype keyword: End of explanation """ # nested lists result in multi-dimensional arrays np.array([range(i, i + 3) for i in [2, 4, 6]]) """ Explanation: Finally, unlike Python lists, NumPy arrays can explicitly be multi-dimensional; here's one way of initializing a multidimensional array using a list of lists: End of explanation """ # Create a length-10 integer array filled with zeros np.zeros(10, dtype=int) # Create a 3x5 floating-point array filled with ones np.ones((3, 5), dtype=float) # Create a 3x5 array filled with 3.14 np.full((3, 5), 3.14) # Create an array filled with a linear sequence # Starting at 0, ending at 20, stepping by 2 # (this is similar to the built-in range() function) np.arange(0, 20, 2) # Create an array of five values evenly spaced between 0 and 1 np.linspace(0, 1, 5) # Create a 3x3 array of uniformly distributed # random values between 0 and 1 np.random.random((3, 3)) # Create a 3x3 array of normally distributed random values # with mean 0 and standard deviation 1 np.random.normal(0, 1, (3, 3)) # Create a 3x3 array of random integers in the interval [0, 10) np.random.randint(0, 10, (3, 3)) # Create a 3x3 identity matrix np.eye(3) # Create an uninitialized array of three integers # The values will be whatever happens to already exist at that memory location np.empty(3) """ Explanation: The inner lists are treated as rows of the resulting two-dimensional array. Creating Arrays from Scratch Especially for larger arrays, it is more efficient to create arrays from scratch using routines built into NumPy. Here are several examples: End of explanation """
bjshaw/phys202-2015-work
assignments/assignment09/IntegrationEx02.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import seaborn as sns from scipy import integrate """ Explanation: Integration Exercise 2 Imports End of explanation """ def integrand(x, a): return 1.0/(x**2 + a**2) def integral_approx(a): # Use the args keyword argument to feed extra arguments to your integrand I, e = integrate.quad(integrand, 0, np.inf, args=(a,)) return I def integral_exact(a): return 0.5*np.pi/a print("Numerical: ", integral_approx(1.0)) print("Exact : ", integral_exact(1.0)) assert True # leave this cell to grade the above integral """ Explanation: Indefinite integrals Here is a table of definite integrals. Many of these integrals has a number of parameters $a$, $b$, etc. Find five of these integrals and perform the following steps: Typeset the integral using LateX in a Markdown cell. Define an integrand function that computes the value of the integrand. Define an integral_approx funciton that uses scipy.integrate.quad to peform the integral. Define an integral_exact function that computes the exact value of the integral. Call and print the return value of integral_approx and integral_exact for one set of parameters. Here is an example to show what your solutions should look like: Example Here is the integral I am performing: $$ I_1 = \int_0^\infty \frac{dx}{x^2 + a^2} = \frac{\pi}{2a} $$ End of explanation """ def integrand1(x,p): return (np.sin(p*x)**2)/(x**2) def integral_approx1(p): I1, e1 = integrate.quad(integrand1, 0, np.inf, args=(p,)) return I1 def integral_exact1(p): return np.pi*p/2 print("Numerical: ", integral_approx1(1.0)) print("Exact : ", integral_exact1(1.0)) assert True # leave this cell to grade the above integral """ Explanation: Integral 1 \begin{equation} I = \int_0^\infty \frac{\sin^2{px}}{x^2}dx = \frac{\pi p}{2} \end{equation} End of explanation """ def integrand2(x): return (np.sin(x))**2 def integral_approx2(): I2, e2 = integrate.quad(integrand2, 0, np.pi/2) return I2 def integral_exact2(): return np.pi/4 print("Numerical: ", integral_approx2()) print("Exact : ", integral_exact2()) assert True # leave this cell to grade the above integral """ Explanation: Integral 2 \begin{equation} I = \int_0^\frac{\pi}{2} \sin^2{x} dx = \frac{\pi}{4} \end{equation} End of explanation """ def integrand3(x,a,b): return np.exp(-a*x)*np.cos(b*x) def integral_approx3(a,b): I3, e3 = integrate.quad(integrand3, 0, np.inf, args=(a,b,)) return I3 def integral_exact3(a,b): return a/(a**2+b**2) print("Numerical: ", integral_approx3(1.0,1.0)) print("Exact : ", integral_exact3(1.0,1.0)) assert True # leave this cell to grade the above integral """ Explanation: Integral 3 \begin{equation} I = \int_0^\infty e^{-ax} \cos{bx} \; dx = \frac{a}{a^2+b^2} \end{equation} End of explanation """ def integrand4(x): return x/(np.exp(x)-1) def integral_approx4(): I4, e4 = integrate.quad(integrand4, 0, np.inf) return I4 def integral_exact4(): return np.pi**2/6 print("Numerical: ", integral_approx4()) print("Exact : ", integral_exact4()) assert True # leave this cell to grade the above integral """ Explanation: Integral 4 \begin{equation} I = \int_0^\infty \frac{x}{e^{x}-1}dx = \frac{\pi^2}{6} \end{equation} End of explanation """ def integrand5(x): return (np.log(1+x))/(x) def integral_approx5(): I5, e5 = integrate.quad(integrand5, 0, 1) return I5 def integral_exact5(): return (np.pi**2)/12 print("Numerical: ", integral_approx5()) print("Exact : ", integral_exact5()) assert True # leave this cell to grade the above integral """ Explanation: Integral 5 \begin{equation} I = \int_0^1 \frac{ln(1+x)}{x}dx = \frac{\pi^2}{12} \end{equation} End of explanation """
cleuton/datascience
book/capt10/server_load.ipynb
apache-2.0
import numpy as np from sklearn.preprocessing import normalize from sklearn.preprocessing import StandardScaler import pandas as pd import matplotlib.pyplot as plt import scipy.stats as stats %matplotlib inline from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import train_test_split import statsmodels.formula.api as smf from statsmodels.compat import lzip from sklearn import feature_selection import statsmodels.regression.linear_model as sm from statsmodels.stats.outliers_influence import variance_inflation_factor from statsmodels.stats.diagnostic import het_breuschpagan """ Explanation: Regressão múltipla A regressão múltipla ocorre quando temos mais de uma variável independente. End of explanation """ df = pd.read_csv('servidor.csv') df.head() df.info() df.describe() results = smf.ols('Duracao_media_ms ~ Perc_medio_CPU + Load_avg_minute + Requests_média', data=df).fit() results.summary() X = df.drop('Duracao_media_ms',axis=1) Xe = sm.add_constant(X,prepend=True) vif = [variance_inflation_factor(Xe.values, i) for i in range(Xe.shape[1])] vif_s =pd.Series(vif, index =Xe.columns) print(vif_s) name = ['Lagrange multiplier statistic', 'p-value', 'f-value', 'f p-value'] testeH = het_breuschpagan(results.resid, results.model.exog) lzip(name, testeH) """ Explanation: Dados de desempenho de um servidor web Os dados para este trabalho foram coletados de um servidor web que hospeda um site. As observações são as médias das variáveis por minuto: - Duracao_media_ms: Duração média do processamento de um Request HTTP (em milissegundos); - Perc_medio_CPU: Percentual médio de ocupação da CPU; - Load_avg_minute: Carga total dos processadores; - Requests_média: Quantidade média de requests processados. End of explanation """
dstrockis/outlook-autocategories
notebooks/3-Playing with text analytics tools.ipynb
apache-2.0
# Load data import pandas as pd with open('./data_files/8lWZYw-u-yNbGBkC4B--ip77K1oVwwyZTHKLeD7rm7k.csv') as data_file: df = pd.read_csv(data_file) df.head() """ Explanation: Hypotheses Cleaner features will improve accuracy & robustness Including the body of the email will improve accuracy Extracting meaning from text will lead to higher quality features End of explanation """ # Remove messages without a Subject and a body print df.shape df = df.dropna(subset=['Subject']) df = df.dropna(subset=['RawBody']) print df.shape # Sample the data set to decrease number of records df = df.sample(frac=0.33, random_state=42) print df.shape print df['RawBody'][0] # Post a single body text to the Entity Recognition API # I estimate running this on a corpus of 10K documents would cost about $50 import requests import json params = {'key': 'AIzaSyA_2WascO_oSrABHD4yMvkR4q5l9JeGO7Y'} data = { 'encodingType': 'UTF8', 'document': { 'type': df['RawContentType'][0], 'content': df['RawBody'][0], } } r = requests.post('https://language.googleapis.com/v1/documents:analyzeEntities', params=params, json=data) print json.dumps(r.json(), sort_keys=True, indent=2, separators=(',', ': ')) import requests import json feature_matrix = pd.DataFrame() for index, row in df.iterrows(): # Perform entity recognition on document params = {'key': 'AIzaSyA_2WascO_oSrABHD4yMvkR4q5l9JeGO7Y'} data = { 'encodingType': 'UTF8', 'document': { 'type': row['RawContentType'], 'content': row['RawBody'], } } r = requests.post('https://language.googleapis.com/v1/documents:analyzeEntities', params=params, json=data) # Populate feature matrix with entities as columns try: for entity in r.json()['entities']: try: feature_matrix.at[index, entity['name'].lower()] = entity['salience'] except KeyError as ex: continue except KeyError as ex: continue feature_matrix.head() # TODO: Need to train with fixed vocabulary, otherwise runtime feature construction won't work correctly # TODO: Try to limit number of rows print len(feature_matrix.columns.values) """ Explanation: Constructing intelligent features Use Google Cloud Natural Language APIs to start Entity Recognition might be powerful, especially with salience data Syntax analysis to get nouns & perform lemmatization End of explanation """ # TODO: Is there some form of TF/IDF to be done here? # Drop rows not in feature matrix df = df.ix[feature_matrix.index.values] print df.shape # Fill NaNs with zeros feature_matrix = feature_matrix.fillna(value=0.0) # Convert to sparse matrix from scipy.sparse import csr_matrix feature_matrix_numpy = csr_matrix(feature_matrix.values) """ Explanation: Strategies for reducing # of columns in feature matrix Add more stop words Remove email addresses Remove URLs Lemmatization Remove number, special characters, sequences of characters like 'aaaaa' Perform manual tokenization to get column names, and inspect types of cols created ... End of explanation """ # Split into test and training data sets from sklearn.model_selection import train_test_split labels_train, labels_test, features_train, features_test = train_test_split(df['FolderId'], feature_matrix_numpy, test_size=0.20, random_state=42) print labels_train.shape print labels_test.shape print features_train.shape print features_test.shape # Train a default Logistic Regression model, with no tuning from sklearn.linear_model import LogisticRegression default_lgr_model = LogisticRegression().fit(features_train, labels_train) # Evaluate default Logistic Regression model on test data default_lgr_predictions = default_lgr_model.predict(features_test) from sklearn import metrics print metrics.accuracy_score(labels_test, default_lgr_predictions) """ Explanation: Train model & evaluate accuracies End of explanation """
NirantK/deep-learning-practice
01-InitNN/first-neural-network.ipynb
apache-2.0
%matplotlib inline %config InlineBackend.figure_format = 'retina' import numpy as np import pandas as pd import matplotlib.pyplot as plt import sys """ Explanation: Your first neural network In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more. End of explanation """ data_path = 'Bike-Sharing-Dataset/hour.csv' rides = pd.read_csv(data_path) rides.head() """ Explanation: Load and prepare the data A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon! End of explanation """ rides[:24*10].plot(x='dteday', y='cnt') """ Explanation: Checking out the data This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above. Below is a plot showing the number of bike riders over the first 10 days in the data set. You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model. End of explanation """ dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday'] for each in dummy_fields: dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False) rides = pd.concat([rides, dummies], axis=1) fields_to_drop = ['instant', 'dteday', 'season', 'weathersit', 'weekday', 'atemp', 'mnth', 'workingday', 'hr'] data = rides.drop(fields_to_drop, axis=1) data.head() """ Explanation: Dummy variables Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies(). End of explanation """ quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed'] # Store scalings in a dictionary so we can convert back later scaled_features = {} for each in quant_features: mean, std = data[each].mean(), data[each].std() scaled_features[each] = [mean, std] data.loc[:, each] = (data[each] - mean)/std """ Explanation: Scaling target variables To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1. The scaling factors are saved so we can go backwards when we use the network for predictions. End of explanation """ # Save the last 21 days test_data = data[-21*24:] data = data[:-21*24] # Separate the data into features and targets target_fields = ['cnt', 'casual', 'registered'] features, targets = data.drop(target_fields, axis=1), data[target_fields] test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields] """ Explanation: Splitting the data into training, testing, and validation sets We'll save the last 21 days of the data to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders. End of explanation """ # Hold out the last 60 days of the remaining data as a validation set train_features, train_targets = features[:-60*24], targets[:-60*24] val_features, val_targets = features[-60*24:], targets[-60*24:] """ Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set). End of explanation """ def sigmoid(x): return 1 / (1 + np.exp(-x)) class NeuralNetwork(object): def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate): '''Set number of nodes in input, hidden and output layers.''' self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Initialize weights self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5, (self.hidden_nodes, self.input_nodes)) # print("input_hidden.shape",self.weights_input_to_hidden.shape) self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5, (self.output_nodes, self.hidden_nodes)) self.lr = learning_rate self.learning_rate = learning_rate #### Set this to your implemented sigmoid function #### # Activation function is the sigmoid function self.activation_function = sigmoid self.del_w_hidden_output = np.zeros(self.weights_input_to_hidden.shape) self.del_w_input_hidden = np.zeros(self.weights_hidden_to_output.shape) def train(self, inputs_list, targets_list): # Convert inputs list to 2d array inputs = np.array(inputs_list, ndmin=2).T targets = np.array(targets_list, ndmin=2).T #### Implement the forward pass here #### ### Forward pass ### # TODO: Hidden layer # print("weights_input_hidden.shape", self.weights_input_to_hidden.shape, "inputs.shape",inputs.shape) hidden_inputs = np.dot(self.weights_input_to_hidden, inputs) # print("hidden_inputs.shape:", hidden_inputs.shape) # signals into hidden layer hidden_outputs = self.activation_function(hidden_inputs) # print("hidden_outputs.shape:", hidden_outputs.shape) # signals from hidden layer # TODO: Output layer # print("hidden_outputs.shape:",hidden_outputs.shape, "weights_hidden_output.shape", self.weights_hidden_to_output.shape) final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs) # signals into final output layer final_outputs = final_inputs # signals from final output layer #### Implement the backward pass here #### ### Backward pass ### # TODO: Output error # print("targets.shape:",targets.shape, "final_outputs.shape", final_outputs.shape) output_errors = targets - final_outputs # Output layer error is the difference between desired target and actual output. # TODO: Backpropagated error # print("output_errors.shape:", output_errors.shape, "weights_hidden_to_output.shape", self.weights_hidden_to_output.shape) hidden_errors = np.dot(output_errors, self.weights_hidden_to_output).T # print("hidden_errors.shape", hidden_errors.shape) # errors propagated to the hidden layer hidden_grad = hidden_errors * hidden_outputs * (1 - hidden_outputs) # print("hidden_grad.shape", hidden_grad.shape) # hidden layer gradients # TODO: Update the weights self.weights_hidden_to_output += self.lr * output_errors * hidden_outputs.T # update hidden-to-output weights with gradient descent step self.weights_input_to_hidden += self.lr * hidden_grad * inputs.T # update input-to-hidden weights with gradient descent step def run(self, inputs_list): # Run a forward pass through the network inputs = np.array(inputs_list, ndmin=2).T #### Implement the forward pass here #### ### Forward pass ### # TODO: Hidden layer hidden_inputs = np.dot(self.weights_input_to_hidden, inputs) # signals into hidden layer hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer # TODO: Output layer final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs) # signals into final output layer final_outputs = final_inputs # signals from final output layer return final_outputs def MSE(y, Y): return np.mean((y-Y)**2) """ Explanation: Time to build the network Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes. The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation. We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation. Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$. Below, you have these tasks: 1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function. 2. Implement the forward pass in the train method. 3. Implement the backpropagation algorithm in the train method, including calculating the output error. 4. Implement the forward pass in the run method. End of explanation """ ### Set the hyperparameters here ### epochs = 1000 learning_rate = 0.01 hidden_nodes = 2 output_nodes = 1 N_i = train_features.shape[1] def neural_network_training(N_i=N_i, hidden_nodes=hidden_nodes, output_nodes=output_nodes, learning_rate=learning_rate): network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate) losses = {'train':[], 'validation':[]} for e in range(epochs): # Go through a random batch of 128 records from the training data set batch = np.random.choice(train_features.index, size=128) for record, target in zip(train_features.ix[batch].values, train_targets.ix[batch]['cnt']): network.train(record, target) # Printing out the training progress train_loss = MSE(network.run(train_features), train_targets['cnt'].values) val_loss = MSE(network.run(val_features), val_targets['cnt'].values) sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \ + "% ... Training loss: " + str(train_loss)[:5] \ + " ... Validation loss: " + str(val_loss)[:5]) losses['train'].append(train_loss) losses['validation'].append(val_loss) return network, losses network, losses = neural_network_training() plt.plot(losses['train'], label='Training loss') plt.plot(losses['validation'], label='Validation loss') plt.legend() plt.ylim(ymax=0.5) """ Explanation: Training the network Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops. You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later. Choose the number of epochs This is the number of times the dataset will pass through the network, each time updating the weights. As the number of epochs increases, the network becomes better and better at predicting the targets in the training set. You'll need to choose enough epochs to train the network well but not too many or you'll be overfitting. Choose the learning rate This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge. Choose the number of hidden nodes The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose. End of explanation """ fig, ax = plt.subplots(figsize=(8,4)) mean, std = scaled_features['cnt'] predictions = network.run(test_features)*std + mean ax.plot(predictions[0], label='Prediction') ax.plot((test_targets['cnt']*std + mean).values, label='Data') ax.set_xlim(right=len(predictions)) ax.legend() dates = pd.to_datetime(rides.ix[test_data.index]['dteday']) dates = dates.apply(lambda d: d.strftime('%b %d')) ax.set_xticks(np.arange(len(dates))[12::24]) _ = ax.set_xticklabels(dates[12::24], rotation=90) """ Explanation: Check out your predictions Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly. End of explanation """ print("Test Loss:", MSE(network.run(test_features)[0], test_targets['cnt'].values)) """ Explanation: Thinking about your results Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does? Note: You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter Your answer below How well does the model predict the data? The model predicts the data quite well with 2 hidden nodes with a MSE Validation Loss ~0.395 and Test loss of ~0.24 for 2 hidden nodes. I infer this to mean that model is generalizable to completely unseen data in this case. End of explanation """ losses_nodes = {} for i in xrange(2, 7): hidden_nodes = i print("For %1.0f hidden_nodes" % i) network, training_validation_losses = neural_network_training(hidden_nodes=i) test_loss = MSE(network.run(test_features)[0], test_targets['cnt'].values) print("\nTest Loss: %f\n" % test_loss) losses_nodes[i] = [test_loss, training_validation_losses] test_losses = [element[0] for element in losses_nodes.values()] validation_losses = [min(element[1]['validation']) for element in losses_nodes.values()] train_losses = [min(element[1]['train']) for element in losses_nodes.values()] fig, ax = plt.subplots(figsize=(8,4)) ax.plot(test_losses, label='Test') ax.plot(validation_losses, label='Validation') ax.plot(train_losses, label = 'Train') ax.set_xlim(right=len(test_losses)) ax.legend() ax.set_xticks(np.arange(len(losses_nodes.keys()))) _ = ax.set_xticklabels(losses_nodes.keys(), rotation=90) """ Explanation: Contrary to expectations, increasing the number of hidden nodes has no drastic impact on the test loss and validation loss numbers both. End of explanation """ def squared_error(y, Y): return (y - Y)**2 SE_Test = squared_error(network.run(test_features)[0], test_targets['cnt'].values) fig, ax = plt.subplots(figsize=(8,4)) mean, std = scaled_features['cnt'] ax.plot(SE_Test, label='Squared Errors (on Test Data)') # ax.plot((test_targets['cnt']*std + mean).values, label='Data') ax.set_xlim(right=len(SE_Test)) ax.legend() ax.set_xticks(np.arange(len(dates))[12::24]) _ = ax.set_xticklabels(dates[12::24], rotation=90) """ Explanation: How well does the model predict the data? The model predicts the data quite well with 2 hidden nodes with a MSE Validation Loss ~0.395 and Test loss of ~0.24 for 2 hidden nodes. I infer this to mean that model is generalizable to completely unseen data in this case. Where does it fail? End of explanation """ fig, ax = plt.subplots(figsize=(8,4)) ax.plot(SE_Test*std, label='Squared Errors (on Test Data)') ax.plot((test_targets['cnt']*std + mean).values, label='Data') # ax.plot(predictions[0], label='Predictions') # ax.plot((test_targets['cnt']*std + mean).values, label='Data') ax.set_xlim(right=len(SE_Test)) ax.legend() ax.set_xticks(np.arange(len(dates))[12::24]) _ = ax.set_xticklabels(dates[12::24], rotation=90) print(np.corrcoef(test_targets['cnt'].values*std+mean,SE_Test)) """ Explanation: Where does the model fail? The above graph clearly illustrates that the mean squared error is highest around holidays. These includes days such as Christmas and New Year eve (Dec 31). It is on such holidays that the model fails. Why does it fail where it does? End of explanation """ import unittest inputs = [0.5, -0.2, 0.1] targets = [0.4] test_w_i_h = np.array([[0.1, 0.4, -0.3], [-0.2, 0.5, 0.2]]) test_w_h_o = np.array([[0.3, -0.1]]) class TestMethods(unittest.TestCase): ########## # Unit tests for data loading ########## def runTest(self): pass def test_data_path(self): # Test that file path to dataset has been unaltered self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv') def test_data_loaded(self): # Test that data frame loaded self.assertTrue(isinstance(rides, pd.DataFrame)) ########## # Unit tests for network functionality ########## def test_activation(self): network = NeuralNetwork(3, 2, 1, 0.5) # Test that the activation function is a sigmoid self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5)))) def test_train(self): # Test that weights are updated correctly on training network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() network.train(inputs, targets) self.assertTrue(np.allclose(network.weights_hidden_to_output, np.array([[ 0.37275328, -0.03172939]]))) self.assertTrue(np.allclose(network.weights_input_to_hidden, np.array([[ 0.10562014, 0.39775194, -0.29887597], [-0.20185996, 0.50074398, 0.19962801]]))) def test_run(self): # Test correctness of run method network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() self.assertTrue(np.allclose(network.run(inputs), 0.09998924)) suite = unittest.TestLoader().loadTestsFromModule(TestMethods()) unittest.TextTestRunner().run(suite) """ Explanation: For checking if there is a relation between model failing and actual consumption, let us look at the correlation between mean squared error and original values. Visually, we see that this is unlikely to be true. It also has a small and safe to ignore Pearson correlation coefficient. I don't completely understand why the model fails where it fails, but I suspect having a way to factor in seasonality from previous years would improve this. Unit tests Run these unit tests to check the correctness of your network implementation. These tests must all be successful to pass the project. End of explanation """
laserson/phip-stat
notebooks/phip_modeling/bayesian-modeling-stats.ipynb
apache-2.0
import pandas as pd import numpy as np import scipy as sp import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline cpm = pd.read_csv('/Users/laserson/tmp/phip_analysis/phip-9/cpm.tsv', sep='\t', header=0, index_col=0) upper_bound = sp.stats.scoreatpercentile(cpm.values.ravel(), 99.9) upper_bound fig, ax = plt.subplots() _ = ax.hist(cpm.values.ravel(), bins=100, log=True) _ = ax.set(title='cpm') fig, ax = plt.subplots() _ = ax.hist(np.log10(cpm.values.ravel() + 0.5), bins=100, log=False) _ = ax.set(title='log10(cpm + 0.5)') fig, ax = plt.subplots() _ = ax.hist(np.log10(cpm.values.ravel() + 0.5), bins=100, log=True) _ = ax.set(title='log10(cpm + 0.5)') """ Explanation: Statistical exploration for Bayesian analysis of PhIP-seq End of explanation """ fig, ax = plt.subplots() _ = ax.hist(cpm.values.ravel()[cpm.values.ravel() <= upper_bound], bins=range(100), log=False) _ = ax.set(xlim=(0, 60)) _ = ax.set(title='trimmed cpm') trimmed_cpm = cpm.values.ravel()[cpm.values.ravel() <= upper_bound] trimmed_cpm.mean(), trimmed_cpm.std() means = cpm.apply(lambda x: x[x <= upper_bound].mean(), axis=1, raw=True) _, edges = np.histogram(means, bins=[sp.stats.scoreatpercentile(means, p) for p in np.linspace(0, 100, 10)]) def plot_hist(ax, a): h, e = np.histogram(a, bins=100, range=(0, upper_bound), density=True) ax.hlines(h, e[:-1], e[1:]) for i in range(len(edges[:-1])): left = edges[i] right = edges[i + 1] rows = (means >= left) & (means <= right) values = cpm[rows].values.ravel() fig, ax = plt.subplots() plot_hist(ax, values) ax.set(xlim=(0, 50), title='mean in ({}, {})'.format(left, right)) """ Explanation: Plot only the lowest 99.9% of the data End of explanation """ a = np.random.poisson(8, 10000) fig, ax = plt.subplots() plot_hist(ax, a) ax.set(xlim=(0, 50)) """ Explanation: Do the slices look Poisson? End of explanation """ fig, ax = plt.subplots() plot_hist(ax, means) ax.set(xlim=(0, 50)) a = np.random.gamma(1, 10, 10000) fig, ax = plt.subplots() plot_hist(ax, a) ax.set(xlim=(0, 50)) means.mean() """ Explanation: For the most part. Maybe try NegBin just in case What does the distribution of the trimmed means look like? End of explanation """ s = np.exp(np.median(np.log(cpm.values + 0.5) - np.log(cpm.values + 0.5).mean(axis=1).reshape((cpm.shape[0], 1)), axis=0)) _ = sns.distplot(s) q = (cpm.values / s).mean(axis=1) fig, ax = plt.subplots() _ = ax.hist(q, bins=100, log=False) fig, ax = plt.subplots() _ = ax.hist(q, bins=100, log=True) w = (cpm.values / s).std(axis=1, ddof=1) fig, ax = plt.subplots() _ = ax.hist(w, bins=100, log=True) fig, ax = plt.subplots() _ = ax.scatter(q, w) _ = sns.lmplot('q', 'w', pd.DataFrame({'q': q, 'w': w})) list(zip(cpm.values.sum(axis=0), s)) s a = np.random.gamma(30, 1/30, 1000) sns.distplot(a) """ Explanation: Following Anders and Huber, Genome Biology 2010, compute some of their stats Compute size factors End of explanation """ import pystan cpm = pd.read_csv('/Users/laserson/tmp/phip_analysis/phip-9/cpm.tsv', sep='\t', header=0, index_col=0) upper_bound = sp.stats.scoreatpercentile(cpm.values, 99.9) trimmed_means = cpm.apply(lambda x: x[x <= upper_bound].mean(), axis=1, raw=True).values brm = pystan.StanModel(model_name='background_rates', file='/Users/laserson/repos/bamophip/background_rates.stan') data = { 'num_clones': trimmed_means.shape[0], 'trimmed_means': trimmed_means } br_fit = brm.sampling(data=data, iter=2000, chains=4) br_fit br_fit.plot() alpha, beta, _ = br_fit.get_posterior_mean().mean(axis=1) alpha, beta h, e = np.histogram(np.random.gamma(alpha, 1 / beta, 50000), bins='auto', density=True) fig, ax = plt.subplots() _ = ax.hist(trimmed_means, bins=100, normed=True) _ = ax.hlines(h, e[:-1], e[1:]) _ = ax.set(xlim=(0, 50)) # assumes the counts for each clone are Poisson distributed with the learned Gamma prior # Therefore, the posterior is Gamma distributed, and we use the expression for its expected value trimmed_sums = cpm.apply(lambda x: x[x <= upper_bound].sum(), axis=1, raw=True).values trimmed_sizes = cpm.apply(lambda x: (x <= upper_bound).sum(), axis=1, raw=True).values background_rates = (alpha + trimmed_sums) / (beta + trimmed_sizes) # mlxp is "minus log 10 pval" mlxp = [] for i in range(cpm.shape[0]): mlxp.append(-sp.stats.poisson.logsf(cpm.values[i], background_rates[i]) / np.log(10)) mlxp = np.asarray(mlxp) fig, ax = plt.subplots() h, e = np.histogram(10**(-mlxp.ravel()), bins='auto') ax.hlines(h, e[:-1], e[1:]) ax.set(xlim=(0, 1)) fig, ax = plt.subplots() finite = np.isfinite(mlxp.ravel()) _ = ax.hist(mlxp.ravel()[finite], bins=100, log=True) fig, ax = plt.subplots() finite = np.isfinite(mlxp.ravel()) _ = ax.hist(np.log10(mlxp.ravel()[finite] + 0.5), bins=100, log=True) old_pvals = pd.read_csv('/Users/laserson/tmp/phip_analysis/phip-9/pvals.tsv', sep='\t', header=0, index_col=0) fig, ax = plt.subplots() h, e = np.histogram(10**(-old_pvals.values.ravel()), bins='auto') ax.hlines(h, e[:-1], e[1:]) ax.set(xlim=(0, 1)) (old_pvals.values.ravel() > 10).sum() (mlxp > 10).sum() len(mlxp.ravel()) """ Explanation: Proceeding with the following strategy/model Trim data to remove top 0.1% of count values. Compute mean of each row and use the means to fit a gamma distribution. Using these values, define a posterior on a rate for each clone, assuming Poisson stats for each cell. This means the posterior is also gamma distributed. Then compute the probability of seeing a more extreme value, weighted with the posterior on r_i. End of explanation """ sp.stats.gamma.fit(trimmed_means) fig, ax = plt.subplots() _ = ax.hist(sp.stats.gamma.rvs(a=0.3387, loc=0, scale=3.102, size=10000), bins=100) _ = ax.set(xlim=(0, 50)) """ Explanation: Can we use scipy's MLE for the gamma parameters instead? End of explanation """ pos = trimmed_means > 0 n = len(trimmed_means) s = trimmed_means[pos].sum() sl = np.log(trimmed_means[pos]).sum() def ll(x): return -1 * (n * x[0] * np.log(x[1]) - n * sp.special.gammaln(x[0]) + (x[0] - 1) * sl - x[1] * s) param = sp.optimize.minimize(ll, np.asarray([2, 1]), bounds=[(np.nextafter(0, 1), None), (np.nextafter(0, 1), None)]) param param.x """ Explanation: Hmmm...doesn't appear to get the correct solution. Alternatively, let's try optimizing the log likelihood ourselves End of explanation """ mlxp = pd.read_csv('/Users/laserson/tmp/phip_analysis/sjogrens/mlxp.tsv', sep='\t', index_col=0, header=0) inputs = pd.read_csv('/Users/laserson/repos/phage_libraries_private/human90/inputs/human90-larman1-input.tsv', sep='\t', index_col=0, header=0) m = pd.merge(mlxp, inputs, left_index=True, right_index=True) sample = 'Sjogrens.serum.Sjogrens.FS12-03967.20A20G.1' sp.stats.pearsonr(10**(-m[sample]), m['input']) sp.stats.spearmanr(10**(-m[sample]), m['input']) fig, ax = plt.subplots() _ = ax.scatter(10**(-m[sample]), m['input']) fig, ax = plt.subplots() _ = ax.scatter(m[sample], m['input']) h, xe, ye = np.histogram2d(m[sample], m['input'], bins=100) fig, ax = plt.subplots() _ = ax.imshow(h) np.histogram2d """ Explanation: SUCCESS! Do the p-values have a correlation with the peptide abundance? End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/hammoz-consortium/cmip6/models/sandbox-3/atmoschem.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'hammoz-consortium', 'sandbox-3', 'atmoschem') """ Explanation: ES-DOC CMIP6 Model Properties - Atmoschem MIP Era: CMIP6 Institute: HAMMOZ-CONSORTIUM Source ID: SANDBOX-3 Topic: Atmoschem Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry. Properties: 84 (39 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:03 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Software Properties 3. Key Properties --&gt; Timestep Framework 4. Key Properties --&gt; Timestep Framework --&gt; Split Operator Order 5. Key Properties --&gt; Tuning Applied 6. Grid 7. Grid --&gt; Resolution 8. Transport 9. Emissions Concentrations 10. Emissions Concentrations --&gt; Surface Emissions 11. Emissions Concentrations --&gt; Atmospheric Emissions 12. Emissions Concentrations --&gt; Concentrations 13. Gas Phase Chemistry 14. Stratospheric Heterogeneous Chemistry 15. Tropospheric Heterogeneous Chemistry 16. Photo Chemistry 17. Photo Chemistry --&gt; Photolysis 1. Key Properties Key properties of the atmospheric chemistry 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of atmospheric chemistry model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of atmospheric chemistry model code. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "troposhere" # "stratosphere" # "mesosphere" # "mesosphere" # "whole atmosphere" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Chemistry Scheme Scope Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Atmospheric domains covered by the atmospheric chemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Basic approximations made in the atmospheric chemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "3D mass/mixing ratio for gas" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.5. Prognostic Variables Form Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Form of prognostic variables in the atmospheric chemistry component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 1.6. Number Of Tracers Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of advected tracers in the atmospheric chemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.family_approach') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 1.7. Family Approach Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Atmospheric chemistry calculations (not advection) generalized into families of species? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 1.8. Coupling With Chemical Reactivity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Software Properties Software properties of aerosol code 2.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Operator splitting" # "Integrated" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Timestep Framework Timestepping in the atmospheric chemistry model 3.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Mathematical method deployed to solve the evolution of a given variable End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Split Operator Advection Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for chemical species advection (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.3. Split Operator Physical Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for physics (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.4. Split Operator Chemistry Timestep Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for chemistry (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 3.5. Split Operator Alternate Order Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.6. Integrated Timestep Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestep for the atmospheric chemistry model (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Implicit" # "Semi-implicit" # "Semi-analytic" # "Impact solver" # "Back Euler" # "Newton Raphson" # "Rosenbrock" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3.7. Integrated Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specify the type of timestep scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Timestep Framework --&gt; Split Operator Order ** 4.1. Turbulence Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.2. Convection Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Precipitation Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.4. Emissions Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.5. Deposition Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.6. Gas Phase Chemistry Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.9. Photo Chemistry Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.10. Aerosols Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Tuning Applied Tuning methodology for atmospheric chemistry component 5.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics of the global mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics of mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Grid Atmospheric chemistry grid 6.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe the general structure of the atmopsheric chemistry grid End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.2. Matches Atmosphere Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 * Does the atmospheric chemistry grid match the atmosphere grid?* End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Grid --&gt; Resolution Resolution in the atmospheric chemistry grid 7.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Canonical Horizontal Resolution Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 7.3. Number Of Horizontal Gridpoints Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 7.4. Number Of Vertical Levels Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Number of vertical levels resolved on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 7.5. Is Adaptive Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Default is False. Set true if grid resolution changes during execution. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Transport Atmospheric chemistry transport 8.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview of transport implementation End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.2. Use Atmospheric Transport Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is transport handled by the atmosphere, rather than within atmospheric cehmistry? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.transport_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Transport Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If transport is handled within the atmospheric chemistry scheme, describe it. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Emissions Concentrations Atmospheric chemistry emissions 9.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview atmospheric chemistry emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Vegetation" # "Soil" # "Sea surface" # "Anthropogenic" # "Biomass burning" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Emissions Concentrations --&gt; Surface Emissions ** 10.1. Sources Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Climatology" # "Spatially uniform mixing ratio" # "Spatially uniform concentration" # "Interactive" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10.2. Method Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.3. Prescribed Climatology Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant)) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.4. Prescribed Spatially Uniform Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted at the surface and prescribed as spatially uniform End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.5. Interactive Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted at the surface and specified via an interactive method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.6. Other Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted at the surface and specified via any other method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Aircraft" # "Biomass burning" # "Lightning" # "Volcanos" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11. Emissions Concentrations --&gt; Atmospheric Emissions TO DO 11.1. Sources Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Climatology" # "Spatially uniform mixing ratio" # "Spatially uniform concentration" # "Interactive" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.2. Method Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.3. Prescribed Climatology Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant)) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.4. Prescribed Spatially Uniform Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted in the atmosphere and prescribed as spatially uniform End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.5. Interactive Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted in the atmosphere and specified via an interactive method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.6. Other Emitted Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of chemical species emitted in the atmosphere and specified via an &quot;other method&quot; End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12. Emissions Concentrations --&gt; Concentrations TO DO 12.1. Prescribed Lower Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the lower boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.2. Prescribed Upper Boundary Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of species prescribed at the upper boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 13. Gas Phase Chemistry Atmospheric chemistry transport 13.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview gas phase atmospheric chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "HOx" # "NOy" # "Ox" # "Cly" # "HSOx" # "Bry" # "VOCs" # "isoprene" # "H2O" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Species included in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.3. Number Of Bimolecular Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of bi-molecular reactions in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.4. Number Of Termolecular Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of ter-molecular reactions in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of reactions in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of reactions in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.7. Number Of Advected Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of advected species in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.8. Number Of Steady State Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.9. Interactive Dry Deposition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.10. Wet Deposition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.11. Wet Oxidation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14. Stratospheric Heterogeneous Chemistry Atmospheric chemistry startospheric heterogeneous chemistry 14.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview stratospheric heterogenous atmospheric chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Cly" # "Bry" # "NOy" # TODO - please enter value(s) """ Explanation: 14.2. Gas Phase Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Gas phase species included in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Polar stratospheric ice" # "NAT (Nitric acid trihydrate)" # "NAD (Nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particule))" # TODO - please enter value(s) """ Explanation: 14.3. Aerosol Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Aerosol species included in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.4. Number Of Steady State Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of steady state species in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 14.5. Sedimentation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 14.6. Coagulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Tropospheric Heterogeneous Chemistry Atmospheric chemistry tropospheric heterogeneous chemistry 15.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview tropospheric heterogenous atmospheric chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Gas Phase Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 List of gas phase species included in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Nitrate" # "Sea salt" # "Dust" # "Ice" # "Organic" # "Black carbon/soot" # "Polar stratospheric ice" # "Secondary organic aerosols" # "Particulate organic matter" # TODO - please enter value(s) """ Explanation: 15.3. Aerosol Species Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Aerosol species included in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.4. Number Of Steady State Species Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of steady state species in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.5. Interactive Dry Deposition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.6. Coagulation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16. Photo Chemistry Atmospheric chemistry photo chemistry 16.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview atmospheric photo chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 16.2. Number Of Reactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 The number of reactions in the photo-chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Offline (clear sky)" # "Offline (with clouds)" # "Online" # TODO - please enter value(s) """ Explanation: 17. Photo Chemistry --&gt; Photolysis Photolysis scheme 17.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Photolysis scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.2. Environmental Conditions Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.) End of explanation """
ioos/notebooks_demos
notebooks/2016-12-20-searching_glider_deployments.ipynb
mit
import requests url = "http://data.ioos.us/gliders/providers/api/deployment" response = requests.get(url) res = response.json() print("Found {0} deployments!".format(res["num_results"])) """ Explanation: Accessing glider data via the Glider DAC API with Python IOOS provides an API for getting information on all the glider deployments available in the Glider DAC. The raw JSON can be accessed at https://data.ioos.us/gliders/providers/api/deployment and it is quite simple to parse it with Python. First, lets check how many glider deployments exist in the Glider DAC. End of explanation """ deployments = res["results"] deployment = deployments[-1] deployment """ Explanation: And here is the JSON of the last deployment found in the list. End of explanation """ import iris iris.FUTURE.netcdf_promote = True # Get this specific glider because it looks cool ;-) for deployment in deployments: if deployment["name"] == "sp064-20161214T1913": url = deployment["dap"] # See https://github.com/Unidata/netcdf-c/issues/1299 for the explanation of `#fillmismatch`. cubes = iris.load_raw("".join([url, "#fillmismatch"])) print(cubes) """ Explanation: The metadata is very rich and informative. A quick way to get to the data is to read dap endpoint with iris. End of explanation """ import matplotlib.pyplot as plt import numpy as np import numpy.ma as ma import seawater as sw from mpl_toolkits.axes_grid1.inset_locator import inset_axes def distance(x, y, units="km"): if ma.isMaskedArray(x): x = x.filled(fill_value=np.NaN) if ma.isMaskedArray(y): y = y.filled(fill_value=np.NaN) dist, pha = sw.dist(x, y, units=units) return np.r_[0, np.cumsum(dist)] def apply_range(cube_coord): if isinstance(cube_coord, iris.cube.Cube): data = cube_coord.data.squeeze() elif isinstance(cube_coord, (iris.coords.AuxCoord, iris.coords.Coord)): data = cube_coord.points.squeeze() actual_range = cube_coord.attributes.get("actual_range") if actual_range is not None: vmin, vmax = actual_range data = ma.masked_outside(data, vmin, vmax) return data def plot_glider(cube, cmap=plt.cm.viridis, figsize=(9, 3.75), track_inset=False): data = apply_range(cube) x = apply_range(cube.coord(axis="X")) y = apply_range(cube.coord(axis="Y")) z = apply_range(cube.coord(axis="Z")) t = cube.coord(axis="T") t = t.units.num2date(t.points.squeeze()) fig, ax = plt.subplots(figsize=figsize) dist = distance(x, y) z = ma.abs(z) dist, _ = np.broadcast_arrays(dist[..., np.newaxis], z.filled(fill_value=np.NaN)) dist, z = map(ma.masked_invalid, (dist, z)) cs = ax.pcolor(dist, z, data, cmap=cmap, snap=True) kw = dict(orientation="horizontal", extend="both", shrink=0.65) cbar = fig.colorbar(cs, **kw) if track_inset: axin = inset_axes( ax, width=2, height=2, loc=4, bbox_to_anchor=(1.15, 0.35), bbox_transform=ax.figure.transFigure, ) axin.plot(x, y, "k.") start, end = (x[0], y[0]), (x[-1], y[-1]) kw = dict(marker="o", linestyle="none") axin.plot(*start, color="g", **kw) axin.plot(*end, color="r", **kw) axin.axis("off") ax.invert_yaxis() ax.invert_xaxis() ax.set_xlabel("Distance (km)") ax.set_ylabel("Depth (m)") return fig, ax, cbar """ Explanation: In order to plot, for example sea water temperature data, one must clean the data first for missing values End of explanation """ %matplotlib inline temp = cubes.extract_strict("sea_water_temperature") fig, ax, cbar = plot_glider( temp, cmap=plt.cm.viridis, figsize=(9, 4.25), track_inset=True ) """ Explanation: The functions above apply the actual_range metadata to the data, mask the invalid/bad values, and prepare the parameters for plotting. The figure below shows the temperature slice (left), and glider track (right) with start and end points marked with green and red respectively. Note: This glider was deployed off the west of the U.S. End of explanation """ bbox = [[-125.72, 32.60], [-117.57, 36.93]] """ Explanation: There are many things the user can do with the API. Here is another example that finds all glider deployments within a boundary box. End of explanation """ from shapely.geometry import LineString def parse_geometry(geometry): """ Filters out potentially bad coordinate pairs as returned from GliderDAC. Returns a safe geometry object. :param dict geometry: A GeoJSON Geometry object """ coords = [] for lon, lat in geometry["coordinates"]: if lon is None or lat is None: continue coords.append([lon, lat]) return {"coordinates": coords} def fetch_trajectory(deployment): """ Downloads the track as GeoJSON from GliderDAC :param dict deployment: The deployment object as returned from GliderDAC """ track_url = "http://data.ioos.us/gliders/status/api/track/{}".format response = requests.get(track_url(deployment["deployment_dir"])) if response.status_code != 200: raise IOError( "Failed to get Glider Track for %s" % deployment["deployment_dir"] ) geometry = parse_geometry(response.json()) coords = LineString(geometry["coordinates"]) return coords """ Explanation: The cell below defines two helper functions to parse the geometry from the JSON and convert the trajectory to a shapely LineString to prepare the data for GIS operations later. End of explanation """ res = response.json()["results"] len(res[-100:]) from shapely.geometry import box search_box = box(bbox[0][0], bbox[0][1], bbox[1][0], bbox[1][1]) inside = dict() # Getting only the 10 deployments. for deployment in response.json()["results"][-10:]: try: coords = fetch_trajectory(deployment) except IOError: continue if search_box.intersects(coords): inside.update({deployment["name"]: coords}) """ Explanation: Now it is easy to check which tracks lie inside the box. End of explanation """ def plot_track(coords, name, color="orange"): x, y = coords.xy locations = list(zip(y.tolist(), x.tolist())) kw = {"fill": True, "radius": 10, "stroke": False} folium.CircleMarker(locations[0], color="green", **kw).add_to(m) folium.CircleMarker(locations[-1], color="red", **kw).add_to(m) folium.PolyLine( locations=locations, color=color, weight=8, opacity=0.2, popup=name ).add_to(m) import folium tiles = ( "http://services.arcgisonline.com/arcgis/rest/services/" "World_Topo_Map/MapServer/MapServer/tile/{z}/{y}/{x}" ) location = [search_box.centroid.y, search_box.centroid.x] m = folium.Map(location=location, zoom_start=5, tiles=tiles, attr="ESRI") for name, coords in inside.items(): plot_track(coords, name, color="orange") m """ Explanation: Finally, we can create an interactive map displaying the tracks found in the bounding box. End of explanation """
mne-tools/mne-tools.github.io
0.15/_downloads/plot_ecog.ipynb
bsd-3-clause
# Authors: Eric Larson <larson.eric.d@gmail.com> # Chris Holdgraf <choldgraf@gmail.com> # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt from scipy.io import loadmat from mayavi import mlab import mne from mne.viz import plot_alignment, snapshot_brain_montage print(__doc__) """ Explanation: Working with ECoG data MNE supports working with more than just MEG and EEG data. Here we show some of the functions that can be used to facilitate working with electrocorticography (ECoG) data. End of explanation """ mat = loadmat(mne.datasets.misc.data_path() + '/ecog/sample_ecog.mat') ch_names = mat['ch_names'].tolist() elec = mat['elec'] dig_ch_pos = dict(zip(ch_names, elec)) mon = mne.channels.DigMontage(dig_ch_pos=dig_ch_pos) print('Created %s channel positions' % len(ch_names)) """ Explanation: Let's load some ECoG electrode locations and names, and turn them into a :class:mne.channels.DigMontage class. End of explanation """ info = mne.create_info(ch_names, 1000., 'ecog', montage=mon) """ Explanation: Now that we have our electrode positions in MRI coordinates, we can create our measurement info structure. End of explanation """ subjects_dir = mne.datasets.sample.data_path() + '/subjects' fig = plot_alignment(info, subject='sample', subjects_dir=subjects_dir, surfaces=['pial']) mlab.view(200, 70) """ Explanation: We can then plot the locations of our electrodes on our subject's brain. <div class="alert alert-info"><h4>Note</h4><p>These are not real electrodes for this subject, so they do not align to the cortical surface perfectly.</p></div> End of explanation """ # We'll once again plot the surface, then take a snapshot. fig = plot_alignment(info, subject='sample', subjects_dir=subjects_dir, surfaces='pial') mlab.view(200, 70) xy, im = snapshot_brain_montage(fig, mon) # Convert from a dictionary to array to plot xy_pts = np.vstack(xy[ch] for ch in info['ch_names']) # Define an arbitrary "activity" pattern for viz activity = np.linspace(100, 200, xy_pts.shape[0]) # This allows us to use matplotlib to create arbitrary 2d scatterplots _, ax = plt.subplots(figsize=(10, 10)) ax.imshow(im) ax.scatter(*xy_pts.T, c=activity, s=200, cmap='coolwarm') ax.set_axis_off() plt.show() """ Explanation: Sometimes it is useful to make a scatterplot for the current figure view. This is best accomplished with matplotlib. We can capture an image of the current mayavi view, along with the xy position of each electrode, with the snapshot_brain_montage function. End of explanation """
amorgun/shad-ml-notebooks
notebooks/s1-6/pca.ipynb
unlicense
# БуЎеЌ стрПОть графОкО завОсОЌПстО разлОчМых параЌетрПв Пт разЌерМПстО прПстраМства def plot_dim(prop, dims=(1, 30), samples=10000, **kwargs): ds = range(dims[0], dims[1] + 1) plot(ds, list(map(lambda d: prop(d, samples=samples, **kwargs), ds))) xlim(dims) from scipy.stats import gaussian_kde def kde_plot(prop, dim, samples=10000, **kwargs): vs = array([prop(dim, samples=1, **kwargs) for _ in range(samples)]) k = gaussian_kde(vs) xs = linspace(min(vs), max(vs), num=1000) plot(xs, k(xs)) # СреЎМяя МПрЌа вектПра Оз еЎОМОчМПгП куба def avg_norm(d, samples): vs = random.random(size=(samples, d)) return average(linalg.norm(vs, axis=1)) plot_dim(avg_norm) # ОтМПшеМОе ПбъёЌПв впОсаММПгП шара О куба def volume_ratio(d, samples): vs = random.random(size=(samples, d)) return average(linalg.norm(vs, axis=1) <= 1) plot_dim(volume_ratio) # ОбъеЌ еЎОМОчМПй сферы def sphere_volume(d, samples): return volume_ratio(d, samples) * 2 ** d plot_dim(sphere_volume, samples=1000000) # РасстПяМОе ЌежЎу ЎвуЌя случайМыЌО вектПраЌО Оз еЎОМОчМПгП куба def rand_dist(d, samples): x = random.random(size=(samples, d)) y = random.random(size=(samples, d)) return average(linalg.norm(x - y, axis=1)) plot_dim(rand_dist) # НПрЌа вектПра Оз ЌМПгПЌерМПгП МПрЌальМПгП распреЎелеМОя def gaussian_dist(d, samples): vs = random.randn(samples, d) return average(linalg.norm(vs, axis=1)) plot_dim(gaussian_dist) # ОтлОчОе ЌежЎу ЌОМОЌальМПй О ЌаксОЌальМПй МПрЌПй в МабПре тПчек def dist_ratio(d, points, samples): vs = random.random(size=(points, d)) ns = linalg.norm(vs, axis=1) ma = max(ns) mi = min(ns) return (ma - mi) / mi kde_plot(dist_ratio, 100, points=50) plot_dim(dist_ratio, points=10000) # КПсОМус угла ЌежЎу случайМыЌО вектПраЌО def rand_cosine(d, samples): xs = random.random(size=(samples, d)) * 2 - 1 ys = random.random(size=(samples, d)) * 2 - 1 return average(dot(xs, ys.T).diagonal() / linalg.norm(xs, axis=1) / linalg.norm(ys, axis=1)) kde_plot(rand_cosine, 300) """ Explanation: ПрПклятОе разЌерМПстО End of explanation """ from sklearn.datasets import load_digits digits = load_digits() f, plots = plt.subplots(1, 10) for i in range(10): plots[i].imshow(digits.images[i], cmap='binary'); plots[i].axis('off'); from sklearn.decomposition import PCA data = digits.data n_components = list(range(1, 10)) + list(range(10, 32, 2)) + [32, 48, 64] pca = [PCA(n_components=i) for i in n_components] for p in pca: p.fit(data) components = pca[len(pca) - 1].transform(data) f, plots = plt.subplots(len(n_components), 10) f.set_size_inches(16, 2*len(n_components)) plt.subplots_adjust(hspace=0) for i, p in enumerate(pca): for j in range(10): plots[i][j].imshow(p.inverse_transform(components[j, :p.n_components]).reshape((8,8)), cmap='binary'); plots[i][j].axis('off') f, plots = plt.subplots(1, 5) for n in range(5): plots[n].imshow(pca[4].inverse_transform([[0, 100][i == n] for i in range(5)]).reshape((8,8)), cmap='binary'); plots[n].axis('off') best_pca = pca[len(pca) - 1] plot([sum(best_pca.explained_variance_ratio_[:i]) for i in range(64)]); from sklearn.decomposition import RandomizedPCA """ Explanation: PCA End of explanation """ from sklearn.decomposition import FastICA n_components = 8 ica = FastICA(n_components=n_components).fit(data) f, plots = plt.subplots(1, n_components) for n in range(n_components): plots[n].imshow(ica.inverse_transform(np.array([[0, 100][i == n] for i in range(n_components)])).reshape((8,8)), cmap='binary'); plots[n].axis('off') """ Explanation: ICA http://scikit-learn.org/stable/auto_examples/decomposition/plot_ica_blind_source_separation.html End of explanation """
phoebe-project/phoebe2-docs
development/tutorials/emcee_custom_lnprob.ipynb
gpl-3.0
#!pip install -I "phoebe>=2.4,<2.5" import phoebe from phoebe import u # units import numpy as np logger = phoebe.logger('error') """ Explanation: Advanced: Custom Cost Funtion (with emcee) IMPORTANT: this tutorial assumes basic knowledge (and uses a file resulting from) the emcee tutorial, although the custom cost function itself can be used for any optimizer or sampler. Setup Let's first make sure we have the latest version of PHOEBE 2.4 installed (uncomment this line if running in an online notebook session such as colab). End of explanation """ b = phoebe.load('emcee_advanced_tutorials.bundle') """ Explanation: We'll then start with the bundle from the end of the emcee tutorial. If you're running this notebook locally, you will need to run that first to create the emcee_advanced_tutorials.bundle file that we will use here. End of explanation """ def default_lnprob(b, model, lnpriors, priors, priors_combine): print("* calling default_lnprob") return lnpriors + b.calculate_lnlikelihood(model=model) """ Explanation: Defining the custom cost function As is described in the b.run_solver API docs, a custom function can be passed which overrides the internal default cost function. This function must accept b, model, lnpriors, priors, priors_combine as arguments and return the lnprobability (cost function). The arguments are as follows: * b: the bundle with the current face-values for this forward model * model: the name of the forward model in b * lnpriors: the pre-computed value of the log-priors by passing priors and priors_combine to b.calculate_lnp * priors: the name(s) of the prior distributions * priors_combine: the choice for how to combine priors if priors includes more than one distribution for any single parameter. If a custom function is not passed, the default cost function is the sum of the lnlikelihood (from b.calculate_lnlikelihood) and the probability of drawing the current face-values from the passed priors. Let's reproduce this default case for the sake of this example. We'll include a print statement just for confirmation that our function is being called. In practice, you could do any modifications here with access to parameter values, distributions, synthetic models, and observations. End of explanation """ b.run_solver('emcee_solver', custom_lnprobability_callable=default_lnprob, niters=1, solution='emcee_sol_custom_lnprob', overwrite=True) """ Explanation: run_solver In order to swap out the default cost function with our custom cost function, we must pass the function itself to custom_lnprobability_callable when calling b.run_solver End of explanation """
dato-code/tutorials
notebooks/getting_started_with_python.ipynb
apache-2.0
print 'Hello World!' """ Explanation: Getting Started with Python and GraphLab Create Python is a popular high-level programming language. It's a simple language, designed with an emphsis on code readability. If you already have programming experience, Python is easy to learn. Installing GraphLab and Python Follow these detailed instructions to install GraphLab Create and Python: https://turi.com/download/install-graphlab-create.html Once you have have Anaconda, start an IPython session. IPython is a powerful interactive shell for executing Python. You can start an IPython session by running "ipython" from the command line. This tutorial is written as IPython notebooks. This allows you to download and run the tutorials on your own machine, either as a notebook (.ipynb) or Python file (.py). Python Basics Now it time to execute our first Python command. We can use <b>print</b> keyword to print a string. End of explanation """ # this is a comment! """ Explanation: In Python single line comments are started with a <b>#</b>. End of explanation """ ''' This is technically just a multiline string but ususually it's used as a multiline comment. ''' """ Explanation: Python doesn't actually have built in support of multiline comments. However this can be done by just creating a multine string and not setting it equal to anything. Multiline string are started and ended with three single quotes or three double quotes. (Single and double quotes are equivant in Python.) End of explanation """ b = True # bool s = 'This is a string' # str i = 4 # int f = 4.1 # float """ Explanation: Python has several built in data types. The simple built in types are called: bool, str, int, and float. These are just shorthand names for: boolean, string, integer, and floating point number. Below are examples of creating each type. End of explanation """ d = {'foo': 1, 'bar': 2} # dict l = [3,2,1] # list t = (1,2,3) # tuple print d['foo'] print l[2] print t[1] """ Explanation: Python has other built in types that are compound types (i.e. types composed of other types). The most common are: list, dict and tuple. dict is just short for dictionary. Below are examples of creating these types, and accessing their elements. End of explanation """ b = None s = None """ Explanation: Tuples are like lists except they are immutable. Strings are also immutable. Python also has a special type called <b>None</b> which can be set to any data type. End of explanation """ print "Our float value is %s. Our int value is %s." % (f, i) """ Explanation: You can print the value of variable inside of strings by using the <b>%</b> operator and placing <b>%s</b> inside of the string. For example: End of explanation """ def add2(x): return x + 2 add2(10) """ Explanation: You create a functions by using the <b>def</b> keyword. Here is an example of a function called <i>add2</i> that takes a value called <i>x</i> return the value of two added to it. End of explanation """ if i == 1 and f > 4: print "The value of i is 1 and f is greater than 4." elif i > 4 or f > 4: print "i is greater than 4 or f is greateer than 4." else: print "Both i and f are less or equal to 4." """ Explanation: Like most programming languages, Python has <b>if</b> and <b>else</b> statements. The <b>elif</b> keyword is used for else-if statements. Unlike a lot of programming language, white space is meaningfull; the body of if-statements must be indented from its test-expression. Python doesn't use braces. You can use the <b>and</b> and <b>or</b> keywords to string together multipart tests. End of explanation """ for i in l: print i """ Explanation: Python has two types of loops, <b>for</b> loops and <b>while</b> loops. In a for-loops there is one iteration for each element in the variable. Note that <i>i</i> is the current element, not the index value. End of explanation """ while i < 10: print i i += 1 """ Explanation: While-loops are executed as long as the given expression is True. End of explanation """ import graphlab """ Explanation: Notice the use of "+=" to increment. Unlike a lot of programming languages, Python does not have a increment or decrement operator. GraphLab Create Basics First, download and install GraphLab-Create by following these directions: https://turi.com/download/ In order to use another library, you first need to <b>import</b> that library. Like so: End of explanation """ sf = graphlab.SFrame.read_csv('https://static.turi.com/datasets/coursera/toy_datasets/people-example.csv') """ Explanation: Using GraphLab Create, we can easily read in comma seperated file. End of explanation """ sf # you can view the contents # you can explore summaries of the data sf.show() # you can also do this inline graphlab.canvas.set_target('ipynb') sf['age'].show(view='Categorical') """ Explanation: SFrame basics End of explanation """ sf['Country'] """ Explanation: Suppose we just wanted to look a single column. End of explanation """ # add a new column called "Full Name": sf['Full Name'] = sf['First Name'] + ' ' + sf['Last Name'] sf # You can filter finding all rows that match a logical condition sf[sf['Full Name'] == 'Felix Brown'] # You can do math print sf['age'] print sf['age'].mean() print sf['age'].std() print sf['age']*2 print sf['age']+2*sf['age'] sf['Country'] """ Explanation: You can add columns. End of explanation """ def transform_country(country): if country == 'USA': return 'United States' else: return country sf['Country'].apply(transform_country) """ Explanation: On the countries, notice that we have two country values that mean the same thing: "United States" and "USA". To fix this we can apply a function to transform the 'USA' to 'United States' End of explanation """ sf['Country'] = sf['Country'].apply(lambda cur_value: 'United States' if cur_value == 'USA' else cur_value) sf.show() """ Explanation: We could also have used a <b>lambda</b> function in the apply. Lambdas are just inline, unamed functions. Lambdas also don't have explict return statements. What the expression evaluates to will be automatically returned End of explanation """
roebius/deeplearning_keras2
nbs/lesson4.ipynb
apache-2.0
ratings = pd.read_csv(path+'ratings.csv') ratings.head() len(ratings) """ Explanation: Set up data We're working with the movielens data, which contains one rating per row, like this: End of explanation """ movie_names = pd.read_csv(path+'movies.csv').set_index('movieId')['title'].to_dict users = ratings.userId.unique() movies = ratings.movieId.unique() # userId and movieId become ditionary elements with values ranging from 0 to max len userid2idx = {o:i for i,o in enumerate(users)} movieid2idx = {o:i for i,o in enumerate(movies)} """ Explanation: Just for display purposes, let's read in the movie names too. End of explanation """ ratings.movieId = ratings.movieId.apply(lambda x: movieid2idx[x]) ratings.userId = ratings.userId.apply(lambda x: userid2idx[x]) user_min, user_max, movie_min, movie_max = (ratings.userId.min(), ratings.userId.max(), ratings.movieId.min(), ratings.movieId.max()) user_min, user_max, movie_min, movie_max n_users = ratings.userId.nunique() n_movies = ratings.movieId.nunique() n_users, n_movies """ Explanation: We update the movie and user ids so that they are contiguous integers, which we want when using embeddings. End of explanation """ n_factors = 50 np.random.seed = 42 """ Explanation: This is the number of latent factors in each embedding. End of explanation """ msk = np.random.rand(len(ratings)) < 0.8 trn = ratings[msk] val = ratings[~msk] """ Explanation: Randomly split into training and validation. End of explanation """ g=ratings.groupby('userId')['rating'].count() topUsers=g.sort_values(ascending=False)[:15] g=ratings.groupby('movieId')['rating'].count() topMovies=g.sort_values(ascending=False)[:15] top_r = ratings.join(topUsers, rsuffix='_r', how='inner', on='userId') top_r = top_r.join(topMovies, rsuffix='_r', how='inner', on='movieId') pd.crosstab(top_r.userId, top_r.movieId, top_r.rating, aggfunc=np.sum) """ Explanation: Create subset for Excel We create a crosstab of the most popular movies and most movie-addicted users which we'll copy into Excel for creating a simple example. This isn't necessary for any of the modeling below however. End of explanation """ user_in = Input(shape=(1,), dtype='int64', name='user_in') u = Embedding(input_dim=n_users, output_dim=n_factors, input_length=1, embeddings_regularizer=l2(1e-4))(user_in) movie_in = Input(shape=(1,), dtype='int64', name='movie_in') m = Embedding(input_dim=n_movies, output_dim=n_factors, input_length=1, embeddings_regularizer=l2(1e-4))(movie_in) x = dot([u, m], axes=2) x = Flatten()(x) model = Model([user_in, movie_in], x) model.compile(Adam(0.001), loss='mse') model.fit([trn.userId, trn.movieId], trn.rating, batch_size=batch_size, epochs=1, validation_data=([val.userId, val.movieId], val.rating)) model.optimizer.lr=0.01 model.fit([trn.userId, trn.movieId], trn.rating, batch_size=batch_size, epochs=3, validation_data=([val.userId, val.movieId], val.rating)) model.optimizer.lr=0.001 model.fit([trn.userId, trn.movieId], trn.rating, batch_size=batch_size, epochs=6, validation_data=([val.userId, val.movieId], val.rating)) """ Explanation: Dot product The most basic model is a dot product of a movie embedding and a user embedding. Let's see how well that works: End of explanation """ def embedding_input(name, n_in, n_out, reg): inp = Input(shape=(1,), dtype='int64', name=name) return inp, Embedding(input_dim=n_in, output_dim=n_out, input_length=1, embeddings_regularizer=l2(reg))(inp) user_in, u = embedding_input('user_in', n_users, n_factors, 1e-4) movie_in, m = embedding_input('movie_in', n_movies, n_factors, 1e-4) def create_bias(inp, n_in): x = Embedding(input_dim=n_in, output_dim=1, input_length=1)(inp) return Flatten()(x) ub = create_bias(user_in, n_users) mb = create_bias(movie_in, n_movies) x = dot([u, m], axes=2) x = Flatten()(x) x = add([x, ub]) x = add([x, mb]) model = Model([user_in, movie_in], x) model.compile(Adam(0.001), loss='mse') model.fit([trn.userId, trn.movieId], trn.rating, batch_size=batch_size, epochs=1, validation_data=([val.userId, val.movieId], val.rating)) model.optimizer.lr=0.01 model.fit([trn.userId, trn.movieId], trn.rating, batch_size=batch_size, epochs=6, validation_data=([val.userId, val.movieId], val.rating)) model.optimizer.lr=0.001 model.fit([trn.userId, trn.movieId], trn.rating, batch_size=batch_size, epochs=10, validation_data=([val.userId, val.movieId], val.rating)) model.fit([trn.userId, trn.movieId], trn.rating, batch_size=batch_size, epochs=5, validation_data=([val.userId, val.movieId], val.rating)) """ Explanation: The best benchmarks are a bit over 0.9, so this model doesn't seem to be working that well... Bias The problem is likely to be that we don't have bias terms - that is, a single bias for each user and each movie representing how positive or negative each user is, and how good each movie is. We can add that easily by simply creating an embedding with one output for each movie and each user, and adding it to our output. End of explanation """ model.save_weights(model_path+'bias.h5') model.load_weights(model_path+'bias.h5') """ Explanation: This result is quite a bit better than the best benchmarks that we could find with a quick google search - so looks like a great approach! End of explanation """ model.predict([np.array([3]), np.array([6])]) """ Explanation: We can use the model to generate predictions by passing a pair of ints - a user id and a movie id. For instance, this predicts that user #3 would really enjoy movie #6. End of explanation """ g=ratings.groupby('movieId')['rating'].count() topMovies=g.sort_values(ascending=False)[:2000] topMovies = np.array(topMovies.index) """ Explanation: Analyze results To make the analysis of the factors more interesting, we'll restrict it to the top 2000 most popular movies. End of explanation """ get_movie_bias = Model(movie_in, mb) movie_bias = get_movie_bias.predict(topMovies) movie_ratings = [(b[0], movie_names()[movies[i]]) for i,b in zip(topMovies,movie_bias)] """ Explanation: First, we'll look at the movie bias term. We create a 'model' - which in keras is simply a way of associating one or more inputs with one more more outputs, using the functional API. Here, our input is the movie id (a single id), and the output is the movie bias (a single float). End of explanation """ sorted(movie_ratings, key=itemgetter(0))[:15] sorted(movie_ratings, key=itemgetter(0), reverse=True)[:15] """ Explanation: Now we can look at the top and bottom rated movies. These ratings are corrected for different levels of reviewer sentiment, as well as different types of movies that different reviewers watch. End of explanation """ get_movie_emb = Model(movie_in, m) movie_emb = np.squeeze(get_movie_emb.predict([topMovies])) movie_emb.shape """ Explanation: We can now do the same thing for the embeddings. End of explanation """ from sklearn.decomposition import PCA pca = PCA(n_components=3) movie_pca = pca.fit(movie_emb.T).components_ fac0 = movie_pca[0] movie_comp = [(f, movie_names()[movies[i]]) for f,i in zip(fac0, topMovies)] """ Explanation: Because it's hard to interpret 50 embeddings, we use PCA to simplify them down to just 3 vectors. End of explanation """ sorted(movie_comp, key=itemgetter(0), reverse=True)[:10] sorted(movie_comp, key=itemgetter(0))[:10] fac1 = movie_pca[1] movie_comp = [(f, movie_names()[movies[i]]) for f,i in zip(fac1, topMovies)] """ Explanation: Here's the 1st component. It seems to be 'critically acclaimed' or 'classic'. End of explanation """ sorted(movie_comp, key=itemgetter(0), reverse=True)[:10] sorted(movie_comp, key=itemgetter(0))[:10] fac2 = movie_pca[2] movie_comp = [(f, movie_names()[movies[i]]) for f,i in zip(fac2, topMovies)] """ Explanation: The 2nd is 'hollywood blockbuster'. End of explanation """ sorted(movie_comp, key=itemgetter(0), reverse=True)[:10] sorted(movie_comp, key=itemgetter(0))[:10] """ Explanation: The 3rd is 'violent vs happy'. End of explanation """ # The following would be for Python 2 only # reload(sys) # sys.setdefaultencoding('utf8') start=50; end=100 X = fac0[start:end] Y = fac2[start:end] plt.figure(figsize=(15,15)) plt.scatter(X, Y) for i, x, y in zip(topMovies[start:end], X, Y): plt.text(x,y,movie_names()[movies[i]], color=np.random.rand(3)*0.7, fontsize=14) plt.show() """ Explanation: We can draw a picture to see how various movies appear on the map of these components. This picture shows the 1st and 3rd components. End of explanation """ user_in, u = embedding_input('user_in', n_users, n_factors, 1e-4) movie_in, m = embedding_input('movie_in', n_movies, n_factors, 1e-4) x = concatenate([u, m], axis=2) x = Flatten()(x) x = Dropout(0.3)(x) x = Dense(70, activation='relu')(x) x = Dropout(0.75)(x) x = Dense(1)(x) nn = Model([user_in, movie_in], x) nn.compile(Adam(0.001), loss='mse') nn.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, epochs=8, validation_data=([val.userId, val.movieId], val.rating)) """ Explanation: Neural net Rather than creating a special purpose architecture (like our dot-product with bias earlier), it's often both easier and more accurate to use a standard neural network. Let's try it! Here, we simply concatenate the user and movie embeddings into a single vector, which we feed into the neural net. End of explanation """
AllenDowney/DataExploration
nsfg.ipynb
mit
from __future__ import print_function, division import numpy as np import thinkstats2 """ Explanation: Import and Validation Copyright 2015 Allen Downey License: Creative Commons Attribution 4.0 International End of explanation """ def ReadFemPreg(dct_file='2002FemPreg.dct', dat_file='2002FemPreg.dat.gz'): """Reads the NSFG pregnancy data. dct_file: string file name dat_file: string file name returns: DataFrame """ dct = thinkstats2.ReadStataDct(dct_file) preg = dct.ReadFixedWidth(dat_file, compression='gzip') return preg """ Explanation: The NSFG data is in a fixed-width format, documented in a Stata dictionary file. ReadFemPreg reads the dictionary and then reads the data into a Pandas DataFrame. End of explanation """ preg = ReadFemPreg() print(preg.shape) """ Explanation: After loading a DataFrame, I check the shape. End of explanation """ preg.head() """ Explanation: And take a look at the first few rows. End of explanation """ preg.agepreg """ Explanation: Then I validate the variables I am likely to need. The encoding of agepreg is non-obvious. End of explanation """ preg.agepreg /= 100 preg.agepreg.mean() """ Explanation: Which is why you have to read the codebook: http://www.icpsr.umich.edu/nsfg6/Controller?displayPage=labelDetails&fileCode=PREG&section=A&subSec=8016&srtLabel=611935 We can convert to a more obvious representation easily enough: End of explanation """ preg.birthwgt_lb.describe() """ Explanation: For live births, birthweight is coded as two integers, birthwgt_lb and birthwgt_oz. We can use describe to summarize variables. End of explanation """ preg.birthwgt_lb.value_counts().sort_index() """ Explanation: Most of that looks reasonable, but the maximum is 99 lbs! Let's look at the distribution of values: End of explanation """ preg.loc[preg.birthwgt_lb > 20, 'birthwgt_lb'] = np.nan """ Explanation: Consulting the code book, we see that 97, 98, and 99 are sentinel values indicating "not ascertained", "refused", and "don't know" (that is, the respondent did not know). Also, the 51 pound baby is undoubtably an error. We can replace unrealistic values with NaN. End of explanation """ na_vals = [97, 98, 99] preg.birthwgt_lb.replace(na_vals, np.nan, inplace=True) preg.birthwgt_oz.replace(na_vals, np.nan, inplace=True) """ Explanation: And if we don't care about the different sentinel values, we can replace them all with NaN. End of explanation """ preg['totalwgt_lb'] = preg.birthwgt_lb + preg.birthwgt_oz / 16.0 """ Explanation: Next, it will be convenient to recode birthwgt_lb and birthwgt_oz with a single floating-point value. End of explanation """ def CleanFemPreg(preg): """Recodes variables from the pregnancy frame. preg: DataFrame """ # mother's age is encoded in centiyears; convert to years preg.agepreg /= 100.0 # birthwgt_lb contains at least one bogus value (51 lbs) # replace with NaN preg.loc[preg.birthwgt_lb > 20, 'birthwgt_lb'] = np.nan # replace 'not ascertained', 'refused', 'don't know' with NaN na_vals = [97, 98, 99] preg.birthwgt_lb.replace(na_vals, np.nan, inplace=True) preg.birthwgt_oz.replace(na_vals, np.nan, inplace=True) preg.hpagelb.replace(na_vals, np.nan, inplace=True) preg.babysex.replace([7, 9], np.nan, inplace=True) preg.nbrnaliv.replace([9], np.nan, inplace=True) # birthweight is stored in two columns, lbs and oz. # convert to a single column in lb # NOTE: creating a new column requires dictionary syntax, # not attribute assignment (like preg.totalwgt_lb) preg['totalwgt_lb'] = preg.birthwgt_lb + preg.birthwgt_oz / 16.0 # due to a bug in ReadStataDct, the last variable gets clipped; # so for now set it to NaN preg.cmintvw = np.nan """ Explanation: After testing these cleanings and recodings, we can encapsuate them in a function. As we work with additional variables, we might add more lines to this function. End of explanation """ preg.pregordr.value_counts().sort_index() """ Explanation: The NSFG codebook includes summaries for many variables, which we can use to make sure the data is uncorrupted, and we are interpreting it correctly. End of explanation """ assert len(preg) == 13593 assert preg.caseid[13592] == 12571 assert preg.pregordr.value_counts()[1] == 5033 assert preg.nbrnaliv.value_counts()[1] == 8981 assert preg.babysex.value_counts()[1] == 4641 assert preg.birthwgt_lb.value_counts()[7] == 3049 assert preg.birthwgt_oz.value_counts()[0] == 1037 assert preg.prglngth.value_counts()[39] == 4744 assert preg.outcome.value_counts()[1] == 9148 assert preg.birthord.value_counts()[1] == 4413 assert preg.agepreg.value_counts()[22.75] == 100 assert preg.totalwgt_lb.value_counts()[7.5] == 302 weights = preg.finalwgt.value_counts() key = max(weights.keys()) assert preg.finalwgt.value_counts()[key] == 6 """ Explanation: The distribution of pregordr is consistent with the summary in the codebook. After running a few checks like this, I document them using assert statements. End of explanation """
TorbjornT/pyAccuRT
examples/Example1.ipynb
mit
import accuread as ar import matplotlib.pyplot as plt %matplotlib inline plt.style.use(['ggplot']) moddir = '../tests/testdata/' d = ar.ReadART('demo1', # basename of simulation basefolder=moddir, # folder where the Output-folder is located scalar=True, # read scalar irradiance iops=True, # read iop file runvarfile='sza.txt' # add contents of this file to d.runvar not very flexible at the moment ) """ Explanation: Reading output from AccuRT End of explanation """ d.plot(run=2) d.plot(profile=True) """ Explanation: Basic plots The plot method is quite basic at the moment. End of explanation """ d.transmittance(layers=(1,2),integrated=True) d.albedo(layer=1,integrated=True) fig,ax = plt.subplots(ncols=2,figsize=(8,3)) ax[0].plot(d.wavelengths,d.transmittance(layers=(0,1))) ax[1].plot(d.wavelengths,d.transmittance(layers=(1,2))) ax[0].set_title('Atmospheric transmittance') ax[1].set_title('Transmittance of 1m ocean') for a in ax: a.legend([str(x) for x in d.runvar],loc='best',title='SZA') a.set_xlabel('Wavelength [nm]') fig,ax = plt.subplots() ax.plot(d.wavelengths,d.albedo(1)) ax.set_xlabel('Wavelength [nm]') ax.set_ylabel('Albedo') ax.legend([str(x) for x in d.runvar],loc='best',title='SZA') """ Explanation: Calculate transmittance and albedo Can calculate either wavelength dependent or integrated. End of explanation """
jamesjia94/BIDMach
tutorials/NVIDIA/.ipynb_checkpoints/ClusteringImages-checkpoint.ipynb
bsd-3-clause
import BIDMat.{CMat,CSMat,DMat,Dict,IDict,Image,FMat,FND,GDMat,GMat,GIMat,GSDMat,GSMat,HMat,IMat,Mat,SMat,SBMat,SDMat} import BIDMat.MatFunctions._ import BIDMat.SciFunctions._ import BIDMat.Solvers._ import BIDMat.JPlotting._ import BIDMach.Learner import BIDMach.models.{FM,GLM,KMeans,KMeansw,ICA,LDA,LDAgibbs,Model,NMF,RandomForest,SFA,SVD} import BIDMach.datasources.{DataSource,MatSource,FileSource,SFileSource} import BIDMach.mixins.{CosineSim,Perplexity,Top,L1Regularizer,L2Regularizer} import BIDMach.updaters.{ADAGrad,Batch,BatchNorm,IncMult,IncNorm,Telescoping} import BIDMach.causal.{IPTW} Mat.checkMKL Mat.checkCUDA Mat.setInline if (Mat.hasCUDA > 0) GPUmem """ Explanation: GPU-accelerated K-Means Clustering Training models with data that fits in memory is very limiting. But minibatch learners can easily work with data directly from disk. We'll use the MNIST data set, which has 8 million images (about 17 GB). The dataset has been partition into groups of 100k images (using the unix split command) and saved in compressed lz4 files. This dataset is very large and doesnt get loaded by default by <code>getdata.sh</code>. You have to load it explicitly by calling <code>getmnist.sh</code> from the scripts directory. The script automatically splits the data into files that are small enough to be loaded into memory. Let's load BIDMat/BIDMach End of explanation """ val mdir = "/code/BIDMach/data/MNIST8M/parts/" """ Explanation: And define the root directory for this dataset. End of explanation """ val (mm, opts) = KMeans.learner(mdir+"alls%02d.fmat.lz4") """ Explanation: Constrained Clustering. For this tutorial, we are going to evaluate the quality of clustering by using it for classification. We use a labeled dataset, and compute clusters of training samples using k-Means. Then we match new test samples to the clusters and find the best match. The label assigned to the new sample is the majority vote of the cluster. This method by itself doesnt work well. Clusters will often straddle label boundaries leading to poor labelings. Its better to force each cluster to have a single label. We do that by adding the labels in as very strong features before clustering. The label features cause samples with different labels to be very far apart. Far enough that k-Means will never assign them to the same cluster. The data we want looks like this: <pre> Instance 0 Instance 1 Instance 2 ... has label "2" has label "7" has label "0" ... / 0 0 10000 ... | 0 0 0 ... | 10000 0 0 ... | 0 0 0 ... label / 0 0 0 ... features \ 0 0 0 ... (10) | 0 0 0 ... | 0 10000 0 ... | 0 0 0 ... \ 0 0 0 ... / 128 19 5 ... | 47 28 9 ... image / 42 111 18 ... features \ 37 128 17 ... (784) | 18 176 14 ... | .. .. .. </pre> We chose the label feature weights (here 10000) to force the distance between differently-labeled samples (2 * 10000^2) to be larger than the distance between two image samples (1000 * 256^2). This guarantees that points will not be assigned to a cluster containing a different label (assuming there is initially at least one cluster center with each label). Even though these label features are present in cluster centroids after training, they dont affect matching at test time. Test images dont have the label features, and will match the closest cluster based only on image features. That cluster will have a unique label, which we then assign to the test point. The files containind data in this form are named "alls00.fmat.lz4", "alls01.fmat.lz4" etc. Since they contain both data and labels, we dont need to load label files separately. We can create a learner using a pattern for accessing these files: End of explanation """ opts.dim = 300 opts.nend = 10 """ Explanation: The string "%02d" is a C/Scala format string that expands into a two-digit ASCII number to help with the enumeration. There are several new options that can tailor a files datasource, but we'll mostly use the defaults. One thing we will do is define the last file to use for training (number 70). This leaves us with some held-out files to use for testing. End of explanation """ opts.batchSize = 20000 opts.npasses = 6 """ Explanation: Note that the training data include image data and labels (0-9). K-Means is an unsupervised algorithm and if we used image data only KMeans will often build clusters containing different digit images. To produce cleaner clusters, and to facilitate classification later on, the <code>alls</code> data includes both labels in the first 10 rows, and image data in the remaining rows. The label features are scaled by a large constant factor. That means that images of different digits will be far apart in feature space. It effectively prevents different digits occuring in the same cluster. Tuning Options The following options are the important ones for tuning. For KMeans, batchSize has no effect on accracy since the algorithm uses all the data instances to perform an update. So you're free to tune it for best speed. Generally larger is better, as long as you dont use too much GPU ram. npasses is the number of passes over the dataset. Larger is typically better, but the model may overfit at some point. End of explanation """ mm.train """ Explanation: You invoke the learner the same way as before. You can change the options above after each run to optimize performance. End of explanation """ val modelmat = FMat(mm.modelmat) """ Explanation: Now lets extract the model as a Floating-point matrix. We included the category features for clustering to make sure that each cluster is a subset of images for one digit. End of explanation """ val nx = 30 val ny = 10 val im = zeros(28,28) val allim = zeros(28*nx,28*ny) for (i<-0 until nx) { for (j<-0 until ny) { val slice = modelmat(i+nx*j,10->794) im(?) = slice(?) allim((28*i)->(28*(i+1)), (28*j)->(28*(j+1))) = im } } show(allim kron ones(2,2)) """ Explanation: Next we build a 30 x 10 array of images to view the first 300 cluster centers as images. End of explanation """ val igood = find(sum(modelmat,2) > 100) // find non-empty clusters val mmat = modelmat(igood,?) val (dmy, catmap) = maxi2(mmat(?,0->10).t) // Lookup the label for each cluster mm.model.modelmats(0) = mmat(?,10->mmat.ncols) // Remove the label features mm.model.modelmats(1) = mm.modelmats(1)(igood,0) catmap(0->100) """ Explanation: We'll predict using the closest cluster (or 1-NN if you like). Since we did constrained clustering, our data include the labels for each instance, but unlabeled test data doesnt have this. So we project the model matrix down to remove its first 10 features. Before doing this though we find the strongest label for each cluster so later on we can map from cluster id to label. End of explanation """ val (pp, popts) = KMeans.predictor(mm.model, mdir+"data%02d.fmat.lz4", mdir+"preds%02d.imat.lz4") popts.nstart = 10 // start with file 70 as test data popts.nend = 20 // finish at file 79 popts.ofcols = 100000 // Match number of samples per file to test file popts.batchSize = 20000 """ Explanation: Next we define a predictor from the just-computed model and the testdata, with the preds files to catch the predictions. End of explanation """ pp.predict """ Explanation: Lets run the predictor End of explanation """ val totals = (popts.nstart until popts.nend).map(i => { val preds = loadIMat(mdir + "preds%02d.imat.lz4" format i); // predicted centroids val cats = loadIMat(mdir + "cat%02d.imat.lz4" format i); // reference labels val cpreds = catmap(preds); // map centroid to label accum(cats.t \ cpreds.t, 1.0, 10, 10) // form a confusion matrix }).reduce(_+_) totals """ Explanation: The <code>preds</code> files now contains the numbers of the best-matching cluster centers. We still need to look up the category label for each one, and compare with the reference data. We'll do this one file at a time, so that our evaluation can scale to arbitrary problem sizes. End of explanation """ val conf = float(totals / sum(totals)) """ Explanation: From the actual and predicted categories, we can compute a confusion matrix: End of explanation """ show((conf * 250f) ⊗ ones(32,32)) """ Explanation: Now lets create an image by multiplying each confusion matrix cell by a white square: End of explanation """ val dacc = getdiag(conf).t """ Explanation: Its useful to isolate the correct classification rate by digit, which is: End of explanation """ mean(dacc) """ Explanation: We can take the mean of the diagonal accuracies to get an overall accuracy for this model. End of explanation """
adrn/thejoker
docs/examples/2-Customize-prior.ipynb
mit
import astropy.table as at from astropy.time import Time import astropy.units as u from astropy.visualization.units import quantity_support import matplotlib.pyplot as plt import numpy as np %matplotlib inline import pymc3 as pm import exoplanet.units as xu import thejoker as tj # set up a random number generator to ensure reproducibility rnd = np.random.default_rng(seed=42) """ Explanation: If you have not already read it, you may want to start with the first tutorial: Getting started with The Joker. This is the first tutorial to use pymc3 and exoplanet: We do not explain how to use these packages here, but you may be interested in this pymc3 tutorial and the tutorials in the exoplanet documentation Customizing the prior pdf in The Joker As mentioned in the previous tutorial, the prior pdf used to generate prior samples in The Joker is customizable. For the nonlinear parameters (see previous tutorial for definition), the priors can be arbitrary probability distributions. For the linear parameters, the priors must be Gaussian, but the parameters are customizable. In the tutorial below, we will work through a few examples related to customizing the prior. First, some imports we will need later: End of explanation """ with pm.Model() as model: P = xu.with_unit(pm.Normal('P', 50., 1), u.day) prior = tj.JokerPrior.default( sigma_K0=30*u.km/u.s, sigma_v=100*u.km/u.s, pars={'P': P}) samples1 = prior.sample(size=100_000, random_state=rnd) """ Explanation: Changing one or a few priors from the default prior If you are mostly happy with the default prior used in The Joker, but want to replace the prior pdf for one or a few parameters, you can still use the JokerPrior.default() alternate initializer. To do this, you will need to first define parameter prior distributions as pymc3 variables, then pass these variables in to .default() using the pars=... keyword argument. For example, imagine we had independent constraints on the period of a particular system and therefore wanted to impose a Gaussian prior on the period. As an example, let's assume that the mean and standard deviation of this Gaussian are 50 days and 1 day, respectively. To define this distribution, we will use the pymc3.Normal() class. As a final step, The Joker requires specifying the physical units of the parameters that we define using the exoplanet.units functionality: End of explanation """ plt.hist(samples1['P'].to_value(u.day), bins=64); plt.xlabel('$P$ [day]') """ Explanation: Let's now plot the period samples to make sure they look Gaussian: End of explanation """ with pm.Model() as model: P = xu.with_unit(pm.Normal('P', 50., 1), u.day) K = xu.with_unit(pm.Normal('K', 0., 15), u.km/u.s) prior = tj.JokerPrior.default( sigma_v=100*u.km/u.s, pars={'P': P, 'K': K}) samples2 = prior.sample(size=100_000, random_state=rnd) samples2 """ Explanation: Indeed, it looks like the samples were generated by a Gaussian centered on 50 days, as we specified. This type of replacement works for other parameters as well. For example, you may want to use a simpler prior on the velocity semi-amplitude, $K$, if you use a more restrictive prior on $P$. Here, remember that the prior on any linear parameter (including $K$) must be Gaussian, but we can customize the parameters of the Gaussian: End of explanation """ samples3 = prior.sample(size=100_000, generate_linear=True, random_state=rnd) samples3 """ Explanation: By default, prior.sample() only generates the nonlinear parameters, so you will notice that K does not appear in the returned samples above (variable: samples2). However, we can tell .sample() to also generate the linear parameters: End of explanation """ default_prior = tj.JokerPrior.default( P_min=1e1*u.day, P_max=1e3*u.day, sigma_K0=30*u.km/u.s, sigma_v=75*u.km/u.s) default_samples = default_prior.sample(size=20, generate_linear=True, random_state=rnd, t_ref=Time('J2000')) # set arbitrary time zero-point with pm.Model() as model: K = xu.with_unit(pm.Normal('K', 0., 30), u.km/u.s) custom_prior = tj.JokerPrior.default( P_min=1e1*u.day, P_max=1e3*u.day, sigma_v=75*u.km/u.s, pars={'K': K}) custom_samples = custom_prior.sample(size=len(default_samples), generate_linear=True, random_state=rnd, t_ref=Time('J2000')) # set arbitrary time zero-point now_mjd = Time.now().mjd t_grid = Time(np.linspace(now_mjd - 1000, now_mjd + 1000, 16384), format='mjd') fig, axes = plt.subplots(2, 1, sharex=True, sharey=True, figsize=(8, 8)) _ = tj.plot_rv_curves(default_samples, t_grid=t_grid, ax=axes[0], add_labels=False) _ = tj.plot_rv_curves(custom_samples, t_grid=t_grid, ax=axes[1]) axes[0].set_ylim(-200, 200) fig.tight_layout() """ Explanation: Note that now the samples3 object contains K and v0, the two linear parameters of the default version of The Joker. Next, we will generate full parameter samples (nonliner and linear parameters) for two different priors and compare orbits computed from these samples. Generating prior samples in both nonlinear and linear parameters As mentioned in the previous example, prior.sample() can be told to generate samples in the linear parameters by passing in the argument generate_linear=True. Let's construct samples from the default prior, and compare these samples to others generated from a customize prior that has a fixed K Gaussian (i.e. one that does not scale with period; see the previous tutorial or the documentation for JokerPrior.default()): End of explanation """
ucsd-ccbb/jupyter-genomics
notebooks/crispr/Dual CRISPR 1-Construct Scaffold Trimming.ipynb
mit
g_num_processors = 3 g_fastqs_dir = '/Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/data/raw/20160504_D00611_0275_AHMM2JBCXX' g_trimmed_fastqs_dir = '/Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/data/interim/20160504_D00611_0275_AHMM2JBCXX' g_full_5p_r1 = 'TATATATCTTGTGGAAAGGACGAAACACCG' g_full_5p_r2 = 'CCTTATTTTAACTTGCTATTTCTAGCTCTAAAAC' g_full_3p_r1 = 'GTTTCAGAGCTATGCTGGAAACTGCATAGCAAGTTGAAATAAGGCTAGTCCGTTATCAACTTGAAAAAGTGGCACCGAGTCGGTGCTTTTTTGTACTGAG' g_full_3p_r2 = 'CAAACAAGGCTTTTCTCCAAGGGATATTTATAGTCTCAAAACACACAATTACTTTACAGTTAGGGTGAGTTTCCTTTTGTGCTGTTTTTTAAAATA' g_code_location = '/Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/src/python' """ Explanation: Dual CRISPR Screen Analysis Construct Scaffold Trimming Amanda Birmingham, CCBB, UCSD (abirmingham@ucsd.edu) Instructions To run this notebook reproducibly, follow these steps: 1. Click Kernel > Restart & Clear Output 2. When prompted, click the red Restart & clear all outputs button 3. Fill in the values for your analysis for each of the variables in the Input Parameters section 4. Click Cell > Run All <a name = "input-parameters"></a> Input Parameters End of explanation """ import sys sys.path.append(g_code_location) """ Explanation: CCBB Library Imports End of explanation """ # %load -s describe_var_list /Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/src/python/ccbbucsd/utilities/analysis_run_prefixes.py def describe_var_list(input_var_name_list): description_list = ["{0}: {1}\n".format(name, eval(name)) for name in input_var_name_list] return "".join(description_list) from ccbbucsd.utilities.analysis_run_prefixes import check_or_set, get_run_prefix, get_timestamp g_trimmed_fastqs_dir = check_or_set(g_trimmed_fastqs_dir, g_fastqs_dir) print(describe_var_list(['g_trimmed_fastqs_dir'])) from ccbbucsd.utilities.files_and_paths import verify_or_make_dir verify_or_make_dir(g_trimmed_fastqs_dir) """ Explanation: Automated Set-Up End of explanation """ from ccbbucsd.utilities.notebook_logging import set_stdout_info_logger set_stdout_info_logger() """ Explanation: Info Logging Pass-Through End of explanation """ # %load /Users/Birmingham/Repositories/ccbb_tickets/20160210_mali_crispr/src/python/ccbbucsd/malicrispr/scaffold_trim.py # standard libraries import enum # third-party libraries import cutadapt.scripts.cutadapt # ccbb libraries from ccbbucsd.utilities.files_and_paths import get_file_name_pieces, make_file_path __author__ = 'Amanda Birmingham' __maintainer__ = "Amanda Birmingham" __email__ = "abirmingham@ucsd.edu" __status__ = "prototype" class TrimType(enum.Enum): FIVE = "5" THREE = "3" FIVE_THREE = "53" def get_trimmed_suffix(trimtype): return "_trimmed{0}.fastq".format(trimtype.value) def trim_linked_scaffold(output_dir, fastq_fp, scaffold_seq_5p, scaffold_seq_3p, quiet=True): args = ["-a", "{0}...{1}".format(scaffold_seq_5p,scaffold_seq_3p)] return _run_cutadapt(output_dir, fastq_fp, TrimType.FIVE_THREE, args, quiet) def trim_global_scaffold(output_dir, fastq_fp, scaffold_seq_5p=None, scaffold_seq_3p=None, quiet=True): curr_fastq_fp = fastq_fp if scaffold_seq_5p is not None: curr_fastq_fp = _run_cutadapt_global(output_dir, curr_fastq_fp, scaffold_seq_5p, True, quiet) if scaffold_seq_3p is not None: curr_fastq_fp = _run_cutadapt_global(output_dir, curr_fastq_fp, scaffold_seq_3p, False, quiet) return curr_fastq_fp def _run_cutadapt_global(output_dir, input_fastq_fp, seq_to_trim, is_5p, quiet): end_switch = "-g" end_name = TrimType.FIVE if not is_5p: end_switch = "-a" end_name = TrimType.THREE args = [end_switch, seq_to_trim] return _run_cutadapt(output_dir, input_fastq_fp, end_name, args, quiet) def _run_cutadapt(output_dir, input_fastq_fp, trim_name, partial_args, quiet): _, input_base, _ = get_file_name_pieces(input_fastq_fp) output_fastq_fp = make_file_path(output_dir, input_base, get_trimmed_suffix(trim_name)) args = [x for x in partial_args] if quiet: args.append("--quiet") args.extend(["-o", output_fastq_fp, input_fastq_fp]) cutadapt.scripts.cutadapt.main(args) return output_fastq_fp def trim_fw_and_rv_reads(output_dir, full_5p_r1, full_3p_r1, full_5p_r2, full_3p_r2, fw_fastq_fp, rv_fastq_fp): trim_linked_scaffold(output_dir, fw_fastq_fp, full_5p_r1, full_3p_r1) trim_linked_scaffold(output_dir, rv_fastq_fp, full_5p_r2, full_3p_r2) """ Explanation: Scaffold Trimming Functions End of explanation """ g_seq_file_ext_name = ".fastq" g_gzip_ext_name = ".gz" from ccbbucsd.utilities.files_and_paths import summarize_filenames_for_prefix_and_suffix print(summarize_filenames_for_prefix_and_suffix(g_fastqs_dir, "", "{0}{1}".format(g_seq_file_ext_name, g_gzip_ext_name), all_subdirs=True)) """ Explanation: Gzipped FASTQ Filenames End of explanation """ from ccbbucsd.utilities.files_and_paths import gunzip_wildpath, move_to_dir_and_flatten def unzip_and_flatten_seq_files(top_fastqs_dir, ext_name, gzip_ext_name, keep_gzs): # first, recursively unzip all fastq.gz files anywhere under the input dir gunzip_wildpath(top_fastqs_dir, ext_name + gzip_ext_name, keep_gzs, True) # True = do recursive # now move all fastqs to top-level directory so don't have to work recursively in future move_to_dir_and_flatten(top_fastqs_dir, top_fastqs_dir, ext_name) # False = don't keep gzs as well as expanding, True = do keep them (True only works for gzip 1.6+) unzip_and_flatten_seq_files(g_fastqs_dir, g_seq_file_ext_name, g_gzip_ext_name, False) """ Explanation: FASTQ Gunzip Execution End of explanation """ print(summarize_filenames_for_prefix_and_suffix(g_fastqs_dir, "", g_seq_file_ext_name)) """ Explanation: FASTQ Filenames End of explanation """ from ccbbucsd.utilities.parallel_process_fastqs import parallel_process_paired_reads, concatenate_parallel_results g_parallel_results = parallel_process_paired_reads(g_fastqs_dir, g_seq_file_ext_name, g_num_processors, trim_fw_and_rv_reads, [g_trimmed_fastqs_dir, g_full_5p_r1, g_full_3p_r1, g_full_5p_r2, g_full_3p_r2]) print(concatenate_parallel_results(g_parallel_results)) """ Explanation: Scaffold Trim Execution End of explanation """ print(summarize_filenames_for_prefix_and_suffix(g_trimmed_fastqs_dir, "", get_trimmed_suffix(TrimType.FIVE_THREE))) """ Explanation: Trimmed FASTQ Filenames End of explanation """
woodmd/haloanalysis
notebooks/Select_Population.ipynb
bsd-3-clause
import os import sys from collections import OrderedDict import yaml import numpy as np from astropy.io import fits from astropy.table import Table, Column, join, hstack, vstack from haloanalysis.utils import create_mask, load_source_rows from haloanalysis.sed import HaloSED from haloanalysis.model import CascModel, CascLike from haloanalysis.model import scan_igmf_likelihood from haloanalysis.sed import SED from ebltable.tau_from_model import OptDepth from haloanalysis.utils import create_mask import re %matplotlib inline """ Explanation: With this notebook you can subselect TeV sources for the stacked analysis of the catalog and rank TeV sources for the IGMF analysis Imports End of explanation """ cat = '../data/table_std_psf0123_joint2a_stdmodel_cat_v15.fits' t = Table.read(cat, hdu = 'CATALOG') """ Explanation: Loading the FHES catalog End of explanation """ mask_str = [ {'HBL' : ' (nupeak > 1e15 ) & (var_index < 100.)'}, {'HBL $z < 0.2$' : ' (nupeak > 1e15 ) & (var_index < 100.) & (redshift <= 0.2)'}, {'XHBL' : ' (nupeak > 1e17 ) & (var_index < 100.) & (3lac_fx_fr > 1e4)'}, {'LBL $z > 0.5$' : ' (nupeak <= 1e13 ) & (redshift > 0.5) & (3lac_fx_fr < 1e4)'} ] mask = [] for m in mask_str: mask.append(create_mask(t,m)) print 'surviving sources', np.sum(mask[-1]) """ Explanation: Performing cuts to define samples for stacked analysis End of explanation """ color = ['k','r','b','g'] hatch = ['//','\\','||','-'] #t['redshift'][np.isnan(t['redshift'])] = np.ones(np.sum(np.isnan(t['redshift']))) * -0.1 for i,m in enumerate(mask): if not i: n,bins, patches = plt.hist(t['redshift'][m & np.isfinite(t['redshift'])], bins = 15, normed = False, stacked = False, #range = (-0.1,2.5), label = mask_str[i].keys()[0], edgecolor = color[i], facecolor = 'None', lw = 2, hatch = hatch[i]) else: n,bins, patches = plt.hist(t['redshift'][m& np.isfinite(t['redshift'])], bins = bins, normed = False, stacked = False, label = mask_str[i].keys()[0], edgecolor = color[i], facecolor = 'None', lw = 2, hatch = hatch[i]) plt.grid(True) plt.legend(loc=0) plt.xlabel('Redshift') plt.savefig('redshift_dist_mask.png', format = 'png', dpi = 200) """ Explanation: Show the redshift distributions after the cuts: End of explanation """ tau = OptDepth.readmodel(model = 'dominguez') cat_tev = Table.read('../data/CompiledTeVSources.fits') """ Explanation: Loading the TeV sources End of explanation """ for i,n in enumerate(cat_tev['3FGL_NAME']): cat_tev['3FGL_NAME'][i] = '3FGL ' + n """ Explanation: add suffix 3FGL to TeV catalog: End of explanation """ tfhes_var = Table([t['name_3fgl'],t['var_index']], names = ['3FGL_NAME', 'var_index']) cat_tev = join(cat_tev,tfhes_var) """ Explanation: make a table with 3FGL names and their var index and join with tev table End of explanation """ m = np.isfinite(cat_tev['E_REF'].data) taus = [] for i,z in enumerate(cat_tev['REDSHIFT'].data): taus.append(tau.opt_depth(z,cat_tev['E_REF'].data[i,m[i]])) taus = np.array(taus) tau_max = np.array([tm[-1] for tm in taus]) """ Explanation: Get the optical depth End of explanation """ c = {'var_zsafe' : '(IS_REDSHIFT_SAFE == 1) & (var_index < 100)'} mtev = create_mask(cat_tev,c ) mtev = (tau_max > 2.) & mtev """ Explanation: Cuts on the TeV catalog: End of explanation """ for i,n in enumerate(cat_tev['SOURCE_FULL'].data): # remove sources with only LL on z: if n.find('1553') >= 0 or n.find('1424') >= 0: mtev[i] = False # remove highly variable sources -- this should be clearer defined #if n.find('279') >= 0 or n.find('2155') >= 0 or n.lower().find('mkn') >= 0: mtev[i] = False # fit fails: if n.find('0710') >= 0: mtev[i] = False """ Explanation: Remove sources by hand, e.g. because of variability, not well constrained redshift, etc. End of explanation """ idx = np.arange(mtev.shape[0], dtype = np.int)[np.invert(mtev)] cat_tev.remove_rows(idx) """ Explanation: Remove the rows that fail the cuts from the table: End of explanation """ cat_tev.write('../data/TeV_sources_cut_{0:s}.fits'.format(c.keys()[0]), overwrite = True) print cat_tev help(np.array) """ Explanation: print catalog and save to file End of explanation """
csaladenes/csaladenes.github.io
present/bi/2018/jupyter/pelda.ipynb
mit
df=pd.read_excel('formazottbi2.xlsx') df """ Explanation: Példa 1 End of explanation """ pd.DataFrame(df.stack()).head() """ Explanation: A stack egymásra rakja az oszlopokat. End of explanation """ df.columns df.set_index(['Tevékenység','Ország']).head(2) """ Explanation: Most nem teljesen jó, mert előbb az országot és a tevékenységet ki kellene ragadjuk onnan. Ezért indexet csinálunk belőlÃŒk. End of explanation """ df=pd.DataFrame(df.set_index(['Tevékenység','Ország']).stack()) """ Explanation: Az eddig műveleteket memóriában végeztÃŒk. Most felÃŒlírjuk a dfet. End of explanation """ df=df.reset_index() df.head() """ Explanation: reset_index paranccsal újra visszatesszÃŒk az index oszlopot az adatoszlopok közé. End of explanation """ df.columns=['Tevékenység', 'Ország','Év','Érték'] df.head() """ Explanation: ÁtnevezzÃŒk az oszlopokat azzá amit szeretnénk. End of explanation """ df['Év']=df['Év'].astype(int) """ Explanation: Az év és érték oszlopokat előbb egész számmá kell konvertáljuk plottolás előtt. End of explanation """ import numpy as np df['Érték']=df['Érték'].str.replace('\xa0','')\ .str.replace('..','') df['Érték']=df['Érték'].replace('b','').replace('e','').replace('',np.nan).astype(float) df.plot(x='Év',y='Érték') """ Explanation: A space karaktereket először ki kell cseréljÃŒk. A pandasban a replace fÃŒggvény a teljes cella tartalmára vonatkozik, ezért a str.replpace-t használjuk. End of explanation """ df.to_excel('bi_formazott.xlsx') df.to_csv('bi_formazott.csv') """ Explanation: Mentés End of explanation """ dg=pd.read_csv('data_tobbacco_europe_filtered.csv') dg.head() dg.columns dg.T.head() dh=dg.T dh.index[0] dh.columns=dh.loc[dh.index[0]] dh=dh.loc[dh.index[1:]] dh=dh.set_index('Country',append=True).T dh.head(2) import numpy as np dh=pd.DataFrame(dh.set_index(dh.columns[0],append=True).\ replace('Not available',np.nan).astype(float).stack().stack()).reset_index() dh.head(2) dh.columns=['Ország','Év','Nem','Kérdés','Érték'] dh.to_excel('cigi.xlsx') """ Explanation: Példa 2 End of explanation """
cliburn/sta-663-2017
notebook/10A_CodeOptimization.ipynb
mit
%%file distance.py import numpy as np def euclidean_dist(u, v): """Returns Euclidean distance betwen numpy vectors u and v.""" w = u - v return np.sqrt(np.sum(w**2)) %%file test_distance.py import numpy as np from numpy.testing import assert_almost_equal from distance import euclidean_dist def test_non_negativity(): for i in range(10): u = np.random.normal(3) v = np.random.normal(3) assert euclidean_dist(u, v) >= 0 def test_coincidence_when_zero(): u = np.zeros(3) v = np.zeros(3) assert euclidean_dist(u, v) == 0 def test_coincidence_when_not_zero(): for i in range(10): u = np.random.random(3) v = np.zeros(3) assert euclidean_dist(u, v) != 0 def test_symmetry(): for i in range(10): u = np.random.random(3) v = np.random.random(3) assert euclidean_dist(u, v) == euclidean_dist(v, u) def test_triangle(): u = np.random.random(3) v = np.random.random(3) w = np.random.random(3) assert euclidean_dist(u, w) <= euclidean_dist(u, v) + euclidean_dist(v, w) def test_known1(): u = np.array([0]) v = np.array([3]) assert_almost_equal(euclidean_dist(u, v), 3) def test_known2(): u = np.array([0,0]) v = np.array([3, 4]) assert_almost_equal(euclidean_dist(u, v), 5) def test_known3(): u = np.array([0,0]) v = np.array([-3, -4]) assert_almost_equal(euclidean_dist(u, v), 5) ! py.test """ Explanation: Code Optimization Here we will look briefly at how to time and profile your code, and then at an approach to making your code run faster. There is a sequence of mini-goals that is applicable to nearly every programming problem: Make it run Make it right Make it fast Note that the list does not start with Make it fast. Testing, debugging and optimization are a set of strategies and practices to achieve those goals. Only optimization will be covered in these notes - pointers to resources for testing and debugging are provided but not covered. Testing code For an introduction to standard testing in Python, see Testing Your Code For automated generation of tests, see the Hypothesis package End of explanation """ import time import timeit def f(nsec=1.0): """Function sleeps for nsec seconds.""" time.sleep(nsec) start = timeit.default_timer() f() elapsed = timeit.default_timer() - start elapsed """ Explanation: Debugging Tools within Jupyter from the official tutorial ``` After an exception occurs, you can call %debug to jump into the Python debugger (pdb) and examine the problem. Alternatively, if you call %pdb, IPython will automatically start the debugger on any uncaught exception. You can print variables, see code, execute statements and even walk up and down the call stack to track down the true source of the problem. This can be an efficient way to develop and debug code, in many cases eliminating the need for print statements or external debugging tools. You can also step through a program from the beginning by calling %run -d theprogram.py. ``` See the Scipy tutorial Timing and profiling code Install profiling tools: bash pip install --pre line-profiler pip install psutil pip install memory_profiler References: http://scipy-lectures.github.com/advanced/optimizing/index.html http://pynash.org/2013/03/06/timing-and-profiling.html Timing code 1s = 1000 ms 1 ms = 1000 $\mu$s 1 $\mu$s = 1000 ns Simple approach End of explanation """ def process_time(f, *args, **kwargs): def func(*args, **kwargs): import timeit start = timeit.default_timer() f(*args, **kwargs) print(timeit.default_timer() - start) return func @process_time def f1(nsec=1.0): """Function sleeps for nsec seconds.""" time.sleep(nsec) f1() """ Explanation: We can make a decorator for convenience End of explanation """ %timeit f(0.01) %timeit -n10 f(0.01) %timeit -r10 f(0.01) %timeit -n10 -r3 f(0.01) """ Explanation: Within the Jupyter notebook, use the timeit magic function End of explanation """ def foo1(n): return sum(i**2 for i in range(n)) def foo2(n): return sum(i*i for i in range(n)) def foo3(n): [foo1(n) for i in range(10)] foo2(n) def bar(n): return sum(i**3 for i in range(n)) def work(n): foo1(n) foo2(n) foo3(n) bar(n) %prun -q -D work.prof work(int(1e6)) import pstats p = pstats.Stats('work.prof') p.print_stats() pass p.sort_stats('time', 'cumulative').print_stats('foo') pass p.sort_stats('ncalls').print_stats(5) pass """ Explanation: Profiling code This can be done in a notebook with %prun, with the following readouts as column headers: ncalls for the number of calls, tottime for the total time spent in the given function (and excluding time made in calls to sub-functions), percall is the quotient of tottime divided by ncalls cumtime is the total time spent in this and all subfunctions (from invocation till exit). This figure is accurate even for recursive functions. percall is the quotient of cumtime divided by primitive calls filename:lineno(function) provides the respective data of each function End of explanation """ %load_ext memory_profiler %%file foo.py def foo(n): phrase = 'repeat me' pmul = phrase * n pjoi = ''.join([phrase for x in range(n)]) pinc = '' for x in range(n): pinc += phrase del pmul, pjoi, pinc import sys sys.path.append('.') # mprun requires the code be in a file # functions declared interactively in python will not work from foo import foo %mprun -f foo foo(100000) # However, memit does work with interactive functions # Unlike mprun which gives a line by line analysis # memit gives the total amount of memory used def gobble(n): x = [i*i for i in range(n)] %memit -r 3 gobble(1000000) """ Explanation: Checking memory usage End of explanation """ import numpy as np xs = np.random.randint(0, 1000, 100) ys = np.random.randint(0, 1000, 100) """ Explanation: Data structures and algorithms There are many ways to speed up slow code. However, the first thing that should come to mind (after profiling to identify the bottlenecks) is whether there is a more appropriate data structure or algorithm that can be used. The reason is that this is the only approach that makes a difference to the big O complexity, and this makes all the difference for scalability. A few examples are shown here; a large collection of classic data structures and algorithms in Python with detailed explanations is available at Problem Solving wiht Algorithms and Data Structures You are highly encouraged to take an algorithms class, where you will discover strategies such as: adaptive methods (e.g. adaptive quadrature, adaptive Runge-Kutta) divide and conquer (e.g. Barnes-Hut, Fast Fourier Transform) tabling and dynamic programming (e.g. Viterbi algorithm for Hidden Markov Models) graphs and network algorithms (e.g. shortest path, max flow min cut) hashing (e.g. locality senstive hashing, Bloom filters) probabilistic algorithms (e.g. randomized projections, Monte Carlo integration) Example 1 - finding common elements in two data collections End of explanation """ def common1(xs, ys): """Using lists.""" zs = set([]) for x in xs: for y in ys: if x==y: zs.add(x) return zs %timeit -n3 -r3 common1(xs, ys) """ Explanation: Using lists End of explanation """ %timeit -n3 -r3 set(xs) & set(ys) """ Explanation: Using sets End of explanation """ alist = list(np.random.randint(1000, 100000, 1000)) blist = alist[:] entries = np.random.randint(1, 10000, 10000) def f1(alist, entries): """Using repeated sorts.""" zs = [] for entry in entries: alist.append(entry) alist.sort(reverse=True) zs.append(alist.pop()) return zs %timeit f1(alist, entries) """ Explanation: Example 2 - Find minimum item in a list each time a new item is inserted Using lists End of explanation """ from heapq import heappushpop, heapify def f2(alist, entries): """Using a priority queue.""" heapify(alist) zs = [] for entry in entries: zs.append(heappushpop(alist, entry)) return zs %timeit f2(blist, entries) """ Explanation: Using a heap (priority queue) End of explanation """ def concat1(alist): """Using string concatenation.""" s = alist[0] for item in alist[1:]: s += " " + item return s def concat2(alist): """Using join.""" return " ".join(alist) alist = ['abcde'] * 1000000 %timeit -r3 -n3 concat1(alist) %timeit -r3 -n3 concat2(alist) """ Explanation: Python idioms for speed String concatenation End of explanation """ """Avoiding loops.""" import math def loop1(n): """Using for loop with function call.""" z = [] for i in range(n): z.append(math.sin(i)) return z def loop2(n): """Using local version of function.""" z = [] sin = math.sin for i in range(n): z.append(sin(i)) return z def loop3(n): """Using list comprehension.""" sin = math.sin return [sin(i) for i in range(n)] def loop4(n): """Using map.""" sin = math.sin return list(map(sin, range(n))) def loop5(n): """Using numpy.""" return np.sin(np.arange(n)).tolist() n = 1000000 %timeit -r1 -n1 loop1(n) %timeit -r1 -n1 loop2(n) %timeit -r1 -n1 loop3(n) %timeit -r1 -n1 loop4(n) %timeit -r1 -n1 loop5(n) assert(np.all(loop1(n) == loop2(n))) assert(np.all(loop1(n) == loop3(n))) assert(np.all(loop1(n) == loop4(n))) assert(np.all(loop1(n) == loop5(n))) """ Explanation: Avoiding loops End of explanation """ a = np.arange(1e6) %timeit global a; a = a * 0 %timeit global a; a *= 0 """ Explanation: Using in-place operations End of explanation """ def idx1(xs): """Using loops.""" s = 0 for x in xs: if (x > 10) and (x < 20): s += x return s def idx2(xs): """Using logical indexing.""" return np.sum(xs[(xs > 10) & (xs < 20)]) n = 1000000 xs = np.random.randint(0, 100, n) %timeit -r3 -n3 idx1(xs) %timeit -r3 -n3 idx2(xs) """ Explanation: Using appropriate indexing End of explanation """ def average1(xs): """Using loops.""" ys = xs.copy() rows, cols = xs.shape for i in range(rows): for j in range(cols): s = 0 for u in range(i-1, i+2): if u < 0 or u >= rows: continue for v in range(j-1, j+2): if v < 0 or v >= cols: continue s += xs[u, v] ys[i, j] = s/9.0 return ys def average2(xs): """Using shifted array views and border to avoid out of bounds checks.""" rows, cols = xs.shape xs1 = np.zeros((rows+2, cols+2)) xs1[1:-1, 1:-1] = xs[:] ys = (xs1[:-2, :-2] + xs1[1:-1, :-2] + xs1[2:, :-2] + xs1[:-2, 1:-1] + xs1[1:-1, 1:-1] + xs1[2:, 1:-1] + xs1[:-2, 2:] + xs1[1:-1, 2:] + xs1[2:, 2:])/9.0 return ys n = 25 xs = np.random.uniform(0,10,(n, n)) %timeit -r3 -n3 average1(xs) %timeit -r3 -n3 average2(xs) """ Explanation: Using views to implement stencils End of explanation """ xs = np.random.random((1000, 10)) xs ys = np.random.random((1000, 10)) ys from numpy.core.umath_tests import inner1d %timeit -n3 -r3 np.array([x @ y for x, y in zip(xs, ys)]) %timeit -n3 -r3 inner1d(xs, ys) from numpy.core.umath_tests import matrix_multiply xs = np.random.randint(0, 10, (500, 2, 2)) ys = np.random.randint(0, 10, (500, 2, 2)) %timeit -n3 -r3 np.array([x @ y for x, y in zip(xs, ys)]) %timeit -r3 -n3 matrix_multiply(xs, ys) """ Explanation: Using generalized universal functions (gufuncs) End of explanation """ from functools import lru_cache def fib(n): if n <= 2: return 1 else: return fib(n-1) + fib(n-2) # A simple example of memoization - in practice, use `lru_cache` from functools def memoize(f): store = {} def func(n): if n not in store: store[n] = f(n) return store[n] return func @memoize def mfib(n): return fib(n) @lru_cache() def lfib(n): return fib(n) assert(fib(10) == mfib(10)) assert(fib(10) == lfib(10)) %timeit -r1 -n10 fib(30) %timeit -r1 -n10 mfib(30) %timeit -r1 -n10 lfib(30) """ Explanation: Memoization End of explanation """
ellisztamas/faps
docs/tutorials/06_simulating_data.ipynb
mit
import numpy as np import faps as fp import matplotlib.pylab as plt import pandas as pd from time import time, localtime, asctime print("Created using FAPS version {}.".format(fp.__version__)) """ Explanation: Simulating data and power analysis Tom Ellis, August 2017 End of explanation """ np.random.seed(37) allele_freqs = np.random.uniform(0.2, 0.5, 50) adults = fp.make_parents(10, allele_freqs, family_name='adult') """ Explanation: Before committing to the time and cost of genotyping samples for a paternity study, it is always sensible to run simulations to test the likely statistical power of your data set. This can help with important questions regaridng study design, such as finding an appropriate balance between the number of families vs offspring per family, or identifying a minimum number of loci to type. Simulated data can also be useful in verifying the results of an analysis. FAPS provides tools to run such simulations. In this notebook we look look at: Basic tools for simulating genotype data. Automated tools for power analysis. Crafting custom simulations for specialised purposes. Simulations using emprical datasets (under construction). It is worth noting that I relied on loops for a lot of these tools, for the purely selfish reason that it was easy to code. Loops are of course slow, so if you work with these tools a lot there is ample scope for speeding things up (see especially the functions make_offspring, make_sibships and make_power). Simulation building blocks Creating genotypeArray objects Simulations are built using genotypeArrays. See the section on these here for more information. make_parents generates a population of reproductive adults from population allele frequencies. This example creates ten individuals. Note that this population will be in Hardy-Weinberg equilibrium, but yours may not. End of explanation """ family1 = fp.make_offspring(parents = adults, noffs=5) family1.parents """ Explanation: There are multiple ways to mate adults to generate offspring. If you supply a set of adults and an integer number of offspring, make_offspring mates adults at random. End of explanation """ family2 = fp.make_offspring(parents = adults, dam_list=[7,1,8,8,0], sire_list=[2,6,3,0,7]) family2.parents """ Explanation: You can also supply an explicit list of dams and sires, in which case the adults are paired in the order they appear in each list. End of explanation """ family3 = fp.make_sibships(parents=adults, dam=0, sires=[1,2,3,4], family_size=5) family3.parents """ Explanation: Usually we really want to simulate half sib arrays. This can be done using make_sibships, which mates a single mother to a set of males. End of explanation """ family4 = fp.make_sibships(parents=adults, dam=0, sires=[1,2,3,4], family_size=[5,4,3,2]) family4.parents """ Explanation: For uneven sibship sizes, give a list of sizes for each family of the same length as sires. End of explanation """ np.random.seed(85) allele_freqs = np.random.uniform(0.2, 0.5, 50) adults = fp.make_parents(10, allele_freqs, family_name='adult') progeny = fp.make_sibships(parents=adults, dam=0, sires=[1,2,3,4], family_size=5) """ Explanation: Adding errors Real data almost always contains errors. For SNP data, these take the form of: Missing data, where a locus fails to amplify for some reason Genotyping errors, when the observed genotype at a locus is not the actual genotype. These are straightforward to include in simulated data. First generate some clean data again, and mate the parents. End of explanation """ d, mu= 0.01, 0.0015 # values for dropout and error rate. # add genotyping errors adults_mu = adults.mutations(mu) progeny_mu = progeny.mutations(mu) # add dropouts (to the mutated data) adults_mu = adults_mu.dropouts(d) progeny_mu = progeny.dropouts(d) """ Explanation: It is best to create the progeny before adding errors. Set the error rates and add errors at random. End of explanation """ print(adults.missing_data().mean()) print(adults_mu.missing_data().mean()) """ Explanation: mutations and dropouts make copies of the genotypeArray, so the original data remains unchanged. For example: End of explanation """ np.random.seed(85) allele_freqs = np.random.uniform(0.4, 0.5, 50) adults = fp.make_parents(10, allele_freqs, family_name='adult') progeny = fp.make_sibships(parents=adults, dam=0, sires=[1,2,3,4], family_size=5) mothers = adults.subset(progeny.mothers) patlik = fp.paternity_array(progeny, mothers, adults, mu=0.0015, missing_parents=0.01, integration='partial') sc = fp.sibship_clustering(patlik) """ Explanation: Paternity and sibships Create a paternityArray and cluster into sibships as usual (more information on these objects can be found here and here. End of explanation """ sc.accuracy(progeny, adults) """ Explanation: A very useful tool is the accuracy subfunction for sibshipCluster objects. When the paternity and sibship structure are know (seldom the case in real life, but true for simulated data) this returns an array of handy information about the analysis: Binary indiciator for whether the true partition was included in the sample of partitions. Difference in log likelihood for the maximum likelihood partition identified and the true partition. Positive values indicate that the ML partition had greater support than the true partition. Posterior probability of the true number of families. Mean probabilities that a pair of true full sibs are identified as full sibs. Mean probabilities that a pair of true half sibs are identified as half sibs. Mean probabilities that a pair of true half or full sibs are correctly assigned as such (i.e. overall accuracy of sibship reconstruction. Mean (log) probability of paternity of the true sires for those sires who had been sampled (who had non-zero probability in the paternityArray). Mean (log) probability that the sire had not been sampled for those individuals whose sire was truly absent. End of explanation """ patlik.purge = 'adult_1' patlik.missing_parents=0.5 sc = fp.sibship_clustering(patlik) sc.accuracy(progeny, adults) """ Explanation: In this example, accuracy is high, but the probability of a missing sire is NaN because all the sires are present, and this number of calculated only for offspring whose sire was absent. We can adjust the paternityArray to see how much this effects the results. For example, if we remove the sire of the first family (i.e. the male indexed by 1), there is a drop in the accuracy for full-sibling relationships, although half-sibling relationships are unaffected. End of explanation """ patlik.selfing_rate=0.5 sc = fp.sibship_clustering(patlik) sc.accuracy(progeny, adults) """ Explanation: In contrast, imagine we had an idea that selfing was strong. How would this affect things? End of explanation """ # Common simulation parameters r = 10 # number of replicates nloci = [30,40,50] # number of loci allele_freqs = [0.25, 0.5] # draw allele frequencies nadults = [100,250,500] # size of the adults population mu = 0.0015 #genotype error rates sires = 4 offspring = 5 np.random.seed(614) eventab = fp.make_power( replicates = r, nloci = nloci, allele_freqs = allele_freqs, candidates = nadults, sires = sires, offspring = offspring, missing_loci=0, mu_real = mu, unsampled_input=0.01 ) """ Explanation: The results are identical to the unmodified case; FAPS has correctly identifed the correct partition structure in spite of the (incorrect) strong prior for high selfing. Automation It can be tedious to put together your own simulation for every analysis. FAPS has an automated function that repeatedly creates genotype data, clusters into siblings and calls the accuracy function. You can supply lists of variables and it will evaluate each combination. There are a lot of possible inputs, so have a look at the help page using fp.make_power?. For example, this code creates four families of five full siblings with a genotyping error rate of 0.0015. It considers 30, 40 and 50 loci for 100, 250 or 500 candidate fathers. Each parameter combination is replicated 10 times. In reality you would want to do more than this; I have found that results tend to asymptote with 300 simulations. End of explanation """ fp.make_power(r, nloci, allele_freqs, nadults, sires, offspring, 0, mu_input= 0.003, mu_real=0.0015, unsampled_real=0.1, unsampled_input = 0.05); """ Explanation: For convenience, make_power provides a summary of the input parameters. This can be turned off by setting verbose to False. Similarly, the progress bar can be removed by setting progress to False. This bar uses iPython widgets, and probably won't work outside of iPython, so it may be necessary to turn them off. The results of make_power are basically the output from the accuracy function we saw before, but include information on simulation parameters, and the time taken to create the paternityArray and sibshipCluster objects. View them by inspecting eventab. Arguments to set up the population work much like those to create genotypeArrays, and are quite flexible. Have a look into the help file (run make_power? in Python) for more. You can also take a look at the simulations in support of the main FAPS paper, which considered a range of contrasting demographic scenarios; the example above is adapted from there. Error rates and missing candidates are important topics to get a handle on. We can estimate these parameters (e.g. by genotyping some individuals twice and counting how many loci are different), but we can never completely be sure how close to reality we are. With that in mind make_power allows you to simulate true values mu and the proportion of missing sires, but run the analysis with different values. The idea is to estimate how wrong you could be before the analysis fails. For example, this code would simulate the case where you thought that the error rate was 0.0015, and 5% of the candidates went unsampled, but in reality both parameters were double that amount. End of explanation """ eventab, evenclusters = fp.make_power( replicates = r, nloci = nloci, allele_freqs = allele_freqs, candidates = nadults, sires = sires, offspring = offspring, missing_loci=0, mu_real = mu, unsampled_input=0.01, return_clusters=True, verbose=False ) even_famsizes = np.array([evenclusters[i].family_size() for i in range(len(evenclusters))]) plt.plot(even_famsizes.mean(0)) plt.show() """ Explanation: If you want to perform downstream analysis, you can tell make_power to also export each paternity_Array and/or sibshipCluster object. This is done by setting return_paternities and return_clusters to True. For example, this code pulls out the distribution of family sizes from each sibshipArray, and plots it. End of explanation """ # Common simulation parameters nreps = 10 # number of replicates nloci = [50] # number of loci allele_freqs = [0.1, 0.2, 0.3, 0.4, 0.5] # draw allele frequencies nadults = [10, 100, 250, 500, 750, 1000] # size of the adults population mu_list = [0.0015] #genotype error rates nsims = nreps * len(nloci) * len(allele_freqs) * len(nadults) * len(mu_list) # total number of simulations to run dt = np.zeros([nsims, 7]) # empty array to store data """ Explanation: Custom simulations Once you are familiar with the basic building blocks for generating data and running analysis, creating your own simulations if largely a case of setting up combinations of parameters, and looping over them. Given the vast array of possible scenarios you could want to simulate, it is impossible to be comprehensive here, so it must suffice to given a couple of examples for inspiration. Likelihood for missing sires In this example is was interested in the performance of the likelihood estimator for a sire being absent. This is the likelihood of generating the offspring genotype if paternal alleles come from population allele frequencies. This is what the attribute lik_abset in a paternityArray tells you. Ideally this likelihood should be below the likelihood of paternity for the true sire, but higher than that of the other candidates. I suspected this would not be the case when minor allele frequency is low and there are many candidates. This cell sets up the simulation. I'm considering 50 loci, and mu=0.0015, but varying sample size and allele frequency. End of explanation """ t0 = time() counter = 0 print("Beginning simulations on {}.".format(asctime(localtime(time()) ))) for r in range(nreps): for l in range(len(nloci)): for a in range(len(allele_freqs)): for n in range(len(nadults)): for m in range(len(mu_list)): af = np.repeat(allele_freqs[a], nloci[l]) adults = fp.make_parents(nadults[n], af) progeny = fp.make_offspring(adults, 100) mi = progeny.parent_index('m', adults.names) # maternal index mothers = adults.subset(mi) patlik = fp.paternity_array(progeny, mothers, adults, mu_list[m], missing_parents=0.01) # Find the rank of the missing term within the array. rank = [np.where(np.sort(patlik.prob_array()[i]) == patlik.prob_array()[i,-1])[0][0] for i in range(progeny.size)] rank = np.array(rank).mean() / nadults[n] # get the posterior probabilty fir the missing term. prob_misisng = np.exp(patlik.prob_array()[:, -1]).mean() #export data dt[counter] = np.array([r, nloci[l], allele_freqs[a], nadults[n], mu_list[m], rank, prob_misisng]) # update counters counter += 1 print("Completed in {} hours.".format(round((time() - t0)/3600,2))) head = ['rep', 'nloci', 'allele_freqs', 'nadults', 'mu', 'rank', 'prob_missing'] dt = pd.DataFrame(dt, columns=head) """ Explanation: This cell simulates genotype data and clusters the offspring into full sibships. The code pulls out the mean probability that each sire is absent, and the rank of the likelihood for a missing sire among the likelihoods of paternity for the candidates. End of explanation """
wanderer2/pymc3
docs/source/notebooks/dp_mix.ipynb
apache-2.0
%matplotlib inline from __future__ import division from matplotlib import pyplot as plt import numpy as np import pymc3 as pm import scipy as sp import seaborn as sns from statsmodels.datasets import get_rdataset from theano import tensor as tt blue, *_ = sns.color_palette() SEED = 5132290 # from random.org np.random.seed(SEED) N = 20 K = 30 alpha = 2. P0 = sp.stats.norm """ Explanation: Dirichlet process mixtures for density estimation Author: Austin Rochford Dirichlet processes The Dirichlet process is a flexible probability distribution over the space of distributions. Most generally, a probability distribution, $P$, on a set $\Omega$ is a [measure](https://en.wikipedia.org/wiki/Measure_(mathematics%29) that assigns measure one to the entire space ($P(\Omega) = 1$). A Dirichlet process $P \sim \textrm{DP}(\alpha, P_0)$ is a measure that has the property that, for every finite disjoint partition $S_1, \ldots, S_n$ of $\Omega$, $$(P(S_1), \ldots, P(S_n)) \sim \textrm{Dir}(\alpha P_0(S_1), \ldots, \alpha P_0(S_n)).$$ Here $P_0$ is the base probability measure on the space $\Omega$. The precision parameter $\alpha > 0$ controls how close samples from the Dirichlet process are to the base measure, $P_0$. As $\alpha \to \infty$, samples from the Dirichlet process approach the base measure $P_0$. Dirichlet processes have several properties that make then quite suitable to MCMC simulation. The posterior given i.i.d. observations $\omega_1, \ldots, \omega_n$ from a Dirichlet process $P \sim \textrm{DP}(\alpha, P_0)$ is also a Dirichlet process with $$P\ |\ \omega_1, \ldots, \omega_n \sim \textrm{DP}\left(\alpha + n, \frac{\alpha}{\alpha + n} P_0 + \frac{1}{\alpha + n} \sum_{i = 1}^n \delta_{\omega_i}\right),$$ where $\delta$ is the Dirac delta measure $$\begin{align} \delta_{\omega}(S) & = \begin{cases} 1 & \textrm{if } \omega \in S \ 0 & \textrm{if } \omega \not \in S \end{cases} \end{align}.$$ The posterior predictive distribution of a new observation is a compromise between the base measure and the observations, $$\omega\ |\ \omega_1, \ldots, \omega_n \sim \frac{\alpha}{\alpha + n} P_0 + \frac{1}{\alpha + n} \sum_{i = 1}^n \delta_{\omega_i}.$$ We see that the prior precision $\alpha$ can naturally be interpreted as a prior sample size. The form of this posterior predictive distribution also lends itself to Gibbs sampling. Samples, $P \sim \textrm{DP}(\alpha, P_0)$, from a Dirichlet process are discrete with probability one. That is, there are elements $\omega_1, \omega_2, \ldots$ in $\Omega$ and weights $w_1, w_2, \ldots$ with $\sum_{i = 1}^{\infty} w_i = 1$ such that $$P = \sum_{i = 1}^\infty w_i \delta_{\omega_i}.$$ The stick-breaking process gives an explicit construction of the weights $w_i$ and samples $\omega_i$ above that is straightforward to sample from. If $\beta_1, \beta_2, \ldots \sim \textrm{Beta}(1, \alpha)$, then $w_i = \beta_i \prod_{j = 1}^{j - 1} (1 - \beta_j)$. The relationship between this representation and stick breaking may be illustrated as follows: Start with a stick of length one. Break the stick into two portions, the first of proportion $w_1 = \beta_1$ and the second of proportion $1 - w_1$. Further break the second portion into two portions, the first of proportion $\beta_2$ and the second of proportion $1 - \beta_2$. The length of the first portion of this stick is $\beta_2 (1 - \beta_1)$; the length of the second portion is $(1 - \beta_1) (1 - \beta_2)$. Continue breaking the second portion from the previous break in this manner forever. If $\omega_1, \omega_2, \ldots \sim P_0$, then $$P = \sum_{i = 1}^\infty w_i \delta_{\omega_i} \sim \textrm{DP}(\alpha, P_0).$$ We can use the stick-breaking process above to easily sample from a Dirichlet process in Python. For this example, $\alpha = 2$ and the base distribution is $N(0, 1)$. End of explanation """ beta = sp.stats.beta.rvs(1, alpha, size=(N, K)) w = np.empty_like(beta) w[:, 0] = beta[:, 0] w[:, 1:] = beta[:, 1:] * (1 - beta[:, :-1]).cumprod(axis=1) omega = P0.rvs(size=(N, K)) x_plot = np.linspace(-3, 3, 200) sample_cdfs = (w[..., np.newaxis] * np.less.outer(omega, x_plot)).sum(axis=1) fig, ax = plt.subplots(figsize=(8, 6)) ax.plot(x_plot, sample_cdfs[0], c='gray', alpha=0.75, label='DP sample CDFs'); ax.plot(x_plot, sample_cdfs[1:].T, c='gray', alpha=0.75); ax.plot(x_plot, P0.cdf(x_plot), c='k', label='Base CDF'); ax.set_title(r'$\alpha = {}$'.format(alpha)); ax.legend(loc=2); """ Explanation: We draw and plot samples from the stick-breaking process. End of explanation """ fig, (l_ax, r_ax) = plt.subplots(ncols=2, sharex=True, sharey=True, figsize=(16, 6)) K = 50 alpha = 10. beta = sp.stats.beta.rvs(1, alpha, size=(N, K)) w = np.empty_like(beta) w[:, 0] = beta[:, 0] w[:, 1:] = beta[:, 1:] * (1 - beta[:, :-1]).cumprod(axis=1) omega = P0.rvs(size=(N, K)) sample_cdfs = (w[..., np.newaxis] * np.less.outer(omega, x_plot)).sum(axis=1) l_ax.plot(x_plot, sample_cdfs[0], c='gray', alpha=0.75, label='DP sample CDFs'); l_ax.plot(x_plot, sample_cdfs[1:].T, c='gray', alpha=0.75); l_ax.plot(x_plot, P0.cdf(x_plot), c='k', label='Base CDF'); l_ax.set_title(r'$\alpha = {}$'.format(alpha)); l_ax.legend(loc=2); K = 200 alpha = 50. beta = sp.stats.beta.rvs(1, alpha, size=(N, K)) w = np.empty_like(beta) w[:, 0] = beta[:, 0] w[:, 1:] = beta[:, 1:] * (1 - beta[:, :-1]).cumprod(axis=1) omega = P0.rvs(size=(N, K)) sample_cdfs = (w[..., np.newaxis] * np.less.outer(omega, x_plot)).sum(axis=1) r_ax.plot(x_plot, sample_cdfs[0], c='gray', alpha=0.75, label='DP sample CDFs'); r_ax.plot(x_plot, sample_cdfs[1:].T, c='gray', alpha=0.75); r_ax.plot(x_plot, P0.cdf(x_plot), c='k', label='Base CDF'); r_ax.set_title(r'$\alpha = {}$'.format(alpha)); r_ax.legend(loc=2); """ Explanation: As stated above, as $\alpha \to \infty$, samples from the Dirichlet process converge to the base distribution. End of explanation """ N = 5 K = 30 alpha = 2 P0 = sp.stats.norm f = lambda x, theta: sp.stats.norm.pdf(x, theta, 0.3) beta = sp.stats.beta.rvs(1, alpha, size=(N, K)) w = np.empty_like(beta) w[:, 0] = beta[:, 0] w[:, 1:] = beta[:, 1:] * (1 - beta[:, :-1]).cumprod(axis=1) theta = P0.rvs(size=(N, K)) dpm_pdf_components = f(x_plot[np.newaxis, np.newaxis, :], theta[..., np.newaxis]) dpm_pdfs = (w[..., np.newaxis] * dpm_pdf_components).sum(axis=1) fig, ax = plt.subplots(figsize=(8, 6)) ax.plot(x_plot, dpm_pdfs.T, c='gray'); ax.set_yticklabels([]); """ Explanation: Dirichlet process mixtures For the task of density estimation, the (almost sure) discreteness of samples from the Dirichlet process is a significant drawback. This problem can be solved with another level of indirection by using Dirichlet process mixtures for density estimation. A Dirichlet process mixture uses component densities from a parametric family $\mathcal{F} = {f_{\theta}\ |\ \theta \in \Theta}$ and represents the mixture weights as a Dirichlet process. If $P_0$ is a probability measure on the parameter space $\Theta$, a Dirichlet process mixture is the hierarchical model $$ \begin{align} x_i\ |\ \theta_i & \sim f_{\theta_i} \ \theta_1, \ldots, \theta_n & \sim P \ P & \sim \textrm{DP}(\alpha, P_0). \end{align} $$ To illustrate this model, we simulate draws from a Dirichlet process mixture with $\alpha = 2$, $\theta \sim N(0, 1)$, $x\ |\ \theta \sim N(\theta, (0.3)^2)$. End of explanation """ fig, ax = plt.subplots(figsize=(8, 6)) ix = 1 ax.plot(x_plot, dpm_pdfs[ix], c='k', label='Density'); ax.plot(x_plot, (w[..., np.newaxis] * dpm_pdf_components)[ix, 0], '--', c='k', label='Mixture components (weighted)'); ax.plot(x_plot, (w[..., np.newaxis] * dpm_pdf_components)[ix].T, '--', c='k'); ax.set_yticklabels([]); ax.legend(loc=1); """ Explanation: We now focus on a single mixture and decompose it into its individual (weighted) mixture components. End of explanation """ old_faithful_df = get_rdataset('faithful', cache=True).data[['waiting']] """ Explanation: Sampling from these stochastic processes is fun, but these ideas become truly useful when we fit them to data. The discreteness of samples and the stick-breaking representation of the Dirichlet process lend themselves nicely to Markov chain Monte Carlo simulation of posterior distributions. We will perform this sampling using pymc3. Our first example uses a Dirichlet process mixture to estimate the density of waiting times between eruptions of the Old Faithful geyser in Yellowstone National Park. End of explanation """ old_faithful_df['std_waiting'] = (old_faithful_df.waiting - old_faithful_df.waiting.mean()) / old_faithful_df.waiting.std() old_faithful_df.head() fig, ax = plt.subplots(figsize=(8, 6)) n_bins = 20 ax.hist(old_faithful_df.std_waiting, bins=n_bins, color=blue, lw=0, alpha=0.5); ax.set_xlabel('Standardized waiting time between eruptions'); ax.set_ylabel('Number of eruptions'); """ Explanation: For convenience in specifying the prior, we standardize the waiting time between eruptions. End of explanation """ N = old_faithful_df.shape[0] K = 40 def stick_breaking(beta): portion_remaining = tt.concatenate([[1], tt.extra_ops.cumprod(1 - beta)[:-1]]) return beta * portion_remaining with pm.Model() as model: alpha = pm.Gamma('alpha', 1., 1.) beta = pm.Beta('beta', 1., alpha, shape=K) w = pm.Deterministic('w', stick_breaking(beta)) tau = pm.Gamma('tau', 1., 1., shape=K) lambda_ = pm.Uniform('lambda', 0, 5, shape=K) mu = pm.Normal('mu', 0, tau=lambda_ * tau, shape=K) obs = pm.NormalMixture('obs', w, mu, tau=lambda_ * tau, observed=old_faithful_df.std_waiting.values) """ Explanation: Observant readers will have noted that we have not been continuing the stick-breaking process indefinitely as indicated by its definition, but rather have been truncating this process after a finite number of breaks. Obviously, when computing with Dirichlet processes, it is necessary to only store a finite number of its point masses and weights in memory. This restriction is not terribly onerous, since with a finite number of observations, it seems quite likely that the number of mixture components that contribute non-neglible mass to the mixture will grow slower than the number of samples. This intuition can be formalized to show that the (expected) number of components that contribute non-negligible mass to the mixture approaches $\alpha \log N$, where $N$ is the sample size. There are various clever Gibbs sampling techniques for Dirichlet processes that allow the number of components stored to grow as needed. Stochastic memoization is another powerful technique for simulating Dirichlet processes while only storing finitely many components in memory. In this introductory example, we take the much less sophistocated approach of simply truncating the Dirichlet process components that are stored after a fixed number, $K$, of components. Ohlssen, et al. provide justification for truncation, showing that $K > 5 \alpha + 2$ is most likely sufficient to capture almost all of the mixture weight ($\sum_{i = 1}^{K} w_i > 0.99$). In practice, we can verify the suitability of our truncated approximation to the Dirichlet process by checking the number of components that contribute non-negligible mass to the mixture. If, in our simulations, all components contribute non-negligible mass to the mixture, we have truncated the Dirichlet process too early. Our (truncated) Dirichlet process mixture model for the standardized waiting times is $$ \begin{align} \alpha & \sim \textrm{Gamma}(1, 1) \ \beta_1, \ldots, \beta_K & \sim \textrm{Beta}(1, \alpha) \ w_i & = \beta_i \prod_{j = i - 1}^i (1 - \beta_j) \ \ \lambda_1, \ldots, \lambda_K & \sim U(0, 5) \ \tau_1, \ldots, \tau_K & \sim \textrm{Gamma}(1, 1) \ \mu_i\ |\ \lambda_i, \tau_i & \sim N\left(0, (\lambda_i \tau_i)^{-1}\right) \ \ x\ |\ w_i, \lambda_i, \tau_i, \mu_i & \sim \sum_{i = 1}^K w_i\ N(\mu_i, (\lambda_i \tau_i)^{-1}) \end{align} $$ Note that instead of fixing a value of $\alpha$, as in our previous simulations, we specify a prior on $\alpha$, so that we may learn its posterior distribution from the observations. We now construct this model using pymc3. End of explanation """ with model: trace = pm.sample(1000, n_init=50000, random_seed=SEED) """ Explanation: We sample from the model 1,000 times. End of explanation """ pm.traceplot(trace, varnames=['alpha']); """ Explanation: The posterior distribution of $\alpha$ is highly concentrated around 0.2. End of explanation """ fig, ax = plt.subplots(figsize=(8, 6)) plot_w = np.arange(K) + 1 ax.bar(plot_w - 0.5, trace['w'].mean(axis=0), width=1., lw=0); ax.set_xlim(0.5, K); ax.set_xlabel('Component'); ax.set_ylabel('Posterior expected mixture weight'); """ Explanation: To verify that truncation is not biasing our results, we plot the posterior expected mixture weight of each component. End of explanation """ post_pdf_contribs = sp.stats.norm.pdf(np.atleast_3d(x_plot), trace['mu'][:, np.newaxis, :], 1. / np.sqrt(trace['lambda'] * trace['tau'])[:, np.newaxis, :]) post_pdfs = (trace['w'][:, np.newaxis, :] * post_pdf_contribs).sum(axis=-1) post_pdf_low, post_pdf_high = np.percentile(post_pdfs, [2.5, 97.5], axis=0) fig, ax = plt.subplots(figsize=(8, 6)) n_bins = 20 ax.hist(old_faithful_df.std_waiting.values, bins=n_bins, normed=True, color=blue, lw=0, alpha=0.5); ax.fill_between(x_plot, post_pdf_low, post_pdf_high, color='gray', alpha=0.45); ax.plot(x_plot, post_pdfs[0], c='gray', label='Posterior sample densities'); ax.plot(x_plot, post_pdfs[::100].T, c='gray'); ax.plot(x_plot, post_pdfs.mean(axis=0), c='k', label='Posterior expected density'); ax.set_xlabel('Standardized waiting time between eruptions'); ax.set_yticklabels([]); ax.set_ylabel('Density'); ax.legend(loc=2); """ Explanation: We see that only two mixture components have appreciable posterior expected weights, so we conclude that truncating the Dirichlet process to forty components has not appreciably affected our estimates. It is visually plausible that the observed data has two mixture components. We now compute and plot our posterior density estimate. End of explanation """ fig, ax = plt.subplots(figsize=(8, 6)) n_bins = 20 ax.hist(old_faithful_df.std_waiting.values, bins=n_bins, normed=True, color=blue, lw=0, alpha=0.5); ax.plot(x_plot, post_pdfs.mean(axis=0), c='k', label='Posterior expected density'); ax.plot(x_plot, (trace['w'][:, np.newaxis, :] * post_pdf_contribs).mean(axis=0)[:, 0], '--', c='k', label='Posterior expected mixture\ncomponents\n(weighted)'); ax.plot(x_plot, (trace['w'][:, np.newaxis, :] * post_pdf_contribs).mean(axis=0), '--', c='k'); ax.set_xlabel('Standardized waiting time between eruptions'); ax.set_yticklabels([]); ax.set_ylabel('Density'); ax.legend(loc=2); """ Explanation: As above, we can decompose this density estimate into its (weighted) mixture components. End of explanation """ sunspot_df = get_rdataset('sunspot.year', cache=True).data sunspot_df.head() """ Explanation: The Dirichlet process mixture model is incredibly flexible in terms of the family of parametric component distributions ${f_{\theta}\ |\ f_{\theta} \in \Theta}$. We illustrate this flexibility below by using Poisson component distributions to estimate the density of sunspots per year. End of explanation """ N = sunspot_df.shape[0] with pm.Model() as model: alpha = pm.Gamma('alpha', 1., 1.) beta = pm.Beta('beta', 1, alpha, shape=K) w = pm.Deterministic('w', stick_breaking(beta)) mu = pm.Uniform('mu', 0., 300., shape=K) obs = pm.Mixture('obs', w, pm.Poisson.dist(mu), observed=sunspot_df['sunspot.year']) with model: step = pm.Metropolis() trace_ = pm.sample(10000, step=step, random_seed=SEED) trace = trace_[5000::5] """ Explanation: For this example, the model is $$ \begin{align} \alpha & \sim \textrm{Gamma}(1, 1) \ \beta_1, \ldots, \beta_K & \sim \textrm{Beta}(1, \alpha) \ w_i & = \beta_i \prod_{j = i - 1}^i (1 - \beta_j) \ \ \lambda_i, \ldots, \lambda_K & \sim U(0, 300) \ x\ |\ w_i, \lambda_i & \sim \sum_{i = 1}^K w_i\ \textrm{Poisson}(\lambda_i). \end{align} $$ End of explanation """ pm.traceplot(trace, varnames=['alpha']); """ Explanation: For the sunspot model, the posterior distribution of $\alpha$ is concentrated between 1.5 and 4.5, indicating that we should expect more components to contribute non-negligible amounts to the mixture than for the Old Faithful waiting time model. End of explanation """ fig, ax = plt.subplots(figsize=(8, 6)) plot_w = np.arange(K) + 1 ax.bar(plot_w - 0.5, trace['w'].mean(axis=0), width=1., lw=0); ax.set_xlim(0.5, K); ax.set_xlabel('Component'); ax.set_ylabel('Posterior expected mixture weight'); """ Explanation: Indeed, we see that between ten and fifteen mixture components have appreciable posterior expected weight. End of explanation """ x_plot = np.arange(250) post_pmf_contribs = sp.stats.poisson.pmf(np.atleast_3d(x_plot), trace['mu'][:, np.newaxis, :]) post_pmfs = (trace['w'][:, np.newaxis, :] * post_pmf_contribs).sum(axis=-1) post_pmf_low, post_pmf_high = np.percentile(post_pmfs, [2.5, 97.5], axis=0) fig, ax = plt.subplots(figsize=(8, 6)) ax.hist(sunspot_df['sunspot.year'].values, bins=40, normed=True, lw=0, alpha=0.75); ax.fill_between(x_plot, post_pmf_low, post_pmf_high, color='gray', alpha=0.45) ax.plot(x_plot, post_pmfs[0], c='gray', label='Posterior sample densities'); ax.plot(x_plot, post_pmfs[::200].T, c='gray'); ax.plot(x_plot, post_pmfs.mean(axis=0), c='k', label='Posterior expected density'); ax.set_xlabel('Yearly sunspot count'); ax.set_yticklabels([]); ax.legend(loc=1); """ Explanation: We now calculate and plot the fitted density estimate. End of explanation """ fig, ax = plt.subplots(figsize=(8, 6)) ax.hist(sunspot_df['sunspot.year'].values, bins=40, normed=True, lw=0, alpha=0.75); ax.plot(x_plot, post_pmfs.mean(axis=0), c='k', label='Posterior expected density'); ax.plot(x_plot, (trace['w'][:, np.newaxis, :] * post_pmf_contribs).mean(axis=0)[:, 0], '--', c='k', label='Posterior expected\nmixture components\n(weighted)'); ax.plot(x_plot, (trace['w'][:, np.newaxis, :] * post_pmf_contribs).mean(axis=0), '--', c='k'); ax.set_xlabel('Yearly sunspot count'); ax.set_yticklabels([]); ax.legend(loc=1); """ Explanation: Again, we can decompose the posterior expected density into weighted mixture densities. End of explanation """
vvishwa/deep-learning
autoencoder/Convolutional_Autoencoder_Solution.ipynb
mit
%matplotlib inline import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', validation_size=0) img = mnist.train.images[2] plt.imshow(img.reshape((28, 28)), cmap='Greys_r') """ Explanation: Convolutional Autoencoder Sticking with the MNIST dataset, let's improve our autoencoder's performance using convolutional layers. Again, loading modules and the data. End of explanation """ inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs') targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets') ### Encoder conv1 = tf.layers.conv2d(inputs_, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x16 maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same') # Now 14x14x16 conv2 = tf.layers.conv2d(maxpool1, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x8 maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same') # Now 7x7x8 conv3 = tf.layers.conv2d(maxpool2, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x8 encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same') # Now 4x4x8 ### Decoder upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7)) # Now 7x7x8 conv4 = tf.layers.conv2d(upsample1, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x8 upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14)) # Now 14x14x8 conv5 = tf.layers.conv2d(upsample2, 8, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x8 upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28)) # Now 28x28x8 conv6 = tf.layers.conv2d(upsample3, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x16 logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None) #Now 28x28x1 decoded = tf.nn.sigmoid(logits, name='decoded') loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits) cost = tf.reduce_mean(loss) opt = tf.train.AdamOptimizer(0.001).minimize(cost) """ Explanation: Network Architecture The encoder part of the network will be a typical convolutional pyramid. Each convolutional layer will be followed by a max-pooling layer to reduce the dimensions of the layers. The decoder though might be something new to you. The decoder needs to convert from a narrow representation to a wide reconstructed image. For example, the representation could be a 4x4x8 max-pool layer. This is the output of the encoder, but also the input to the decoder. We want to get a 28x28x1 image out from the decoder so we need to work our way back up from the narrow decoder input layer. A schematic of the network is shown below. Here our final encoder layer has size 4x4x8 = 128. The original images have size 28x28 = 784, so the encoded vector is roughlt 16% the size of the original image. These are just suggested sizes for each of the layers. Feel free to change the depths and sizes, but remember our goal here is to find a small representation of the input data. What's going on with the decoder Okay, so the decoder has these "Upsample" layers that you might not have seen before. First off, I'll discuss a bit what these layers aren't. Usually, you'll see deconvolutional layers used to increase the width and height of the layers. They work almost exactly the same as convolutional layers, but it reverse. A stride in the input layer results in a larger stride in the deconvolutional layer. For example, if you have a 3x3 kernel, a 3x3 patch in the input layer will be reduced to one unit in a convolutional layer. Comparatively, one unit in the input layer will be expanded to a 3x3 path in a deconvolutional layer. Deconvolution is often called "transpose convolution" which is what you'll find the TensorFlow API, with tf.nn.conv2d_transpose. However, deconvolutional layers can lead to artifacts in the final images, such as checkerboard patterns. This is due to overlap in the kernels which can be avoided by setting the stride and kernel size equal. In this Distill article from Augustus Odena, et al, the authors show that these checkerboard artifacts can be avoided by resizing the layers using nearest neighbor or bilinear interpolation (upsampling) followed by a convolutional layer. In TensorFlow, this is easily done with tf.image.resize_images, followed by a convolution. Be sure to read the Distill article to get a better understanding of deconvolutional layers and why we're using upsampling. Exercise: Build the network shown above. Remember that a convolutional layer with strides of 1 and 'same' padding won't reduce the height and width. That is, if the input is 28x28 and the convolution layer has stride = 1 and 'same' padding, the convolutional layer will also be 28x28. The max-pool layers are used the reduce the width and height. A stride of 2 will reduce the size by 2. Odena et al claim that nearest neighbor interpolation works best for the upsampling, so make sure to include that as a parameter in tf.image.resize_images or use tf.image.resize_nearest_neighbor. End of explanation """ sess = tf.Session() epochs = 20 batch_size = 200 sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) imgs = batch[0].reshape((-1, 28, 28, 1)) batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: imgs, targets_: imgs}) print("Epoch: {}/{}...".format(e+1, epochs), "Training loss: {:.4f}".format(batch_cost)) fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4)) in_imgs = mnist.test.images[:10] reconstructed = sess.run(decoded, feed_dict={inputs_: in_imgs.reshape((10, 28, 28, 1))}) for images, row in zip([in_imgs, reconstructed], axes): for img, ax in zip(images, row): ax.imshow(img.reshape((28, 28)), cmap='Greys_r') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) fig.tight_layout(pad=0.1) sess.close() """ Explanation: Training As before, here wi'll train the network. Instead of flattening the images though, we can pass them in as 28x28x1 arrays. End of explanation """ inputs_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='inputs') targets_ = tf.placeholder(tf.float32, (None, 28, 28, 1), name='targets') ### Encoder conv1 = tf.layers.conv2d(inputs_, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x32 maxpool1 = tf.layers.max_pooling2d(conv1, (2,2), (2,2), padding='same') # Now 14x14x32 conv2 = tf.layers.conv2d(maxpool1, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x32 maxpool2 = tf.layers.max_pooling2d(conv2, (2,2), (2,2), padding='same') # Now 7x7x32 conv3 = tf.layers.conv2d(maxpool2, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x16 encoded = tf.layers.max_pooling2d(conv3, (2,2), (2,2), padding='same') # Now 4x4x16 ### Decoder upsample1 = tf.image.resize_nearest_neighbor(encoded, (7,7)) # Now 7x7x16 conv4 = tf.layers.conv2d(upsample1, 16, (3,3), padding='same', activation=tf.nn.relu) # Now 7x7x16 upsample2 = tf.image.resize_nearest_neighbor(conv4, (14,14)) # Now 14x14x16 conv5 = tf.layers.conv2d(upsample2, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 14x14x32 upsample3 = tf.image.resize_nearest_neighbor(conv5, (28,28)) # Now 28x28x32 conv6 = tf.layers.conv2d(upsample3, 32, (3,3), padding='same', activation=tf.nn.relu) # Now 28x28x32 logits = tf.layers.conv2d(conv6, 1, (3,3), padding='same', activation=None) #Now 28x28x1 decoded = tf.nn.sigmoid(logits, name='decoded') loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits) cost = tf.reduce_mean(loss) opt = tf.train.AdamOptimizer(0.001).minimize(cost) sess = tf.Session() epochs = 100 batch_size = 200 # Set's how much noise we're adding to the MNIST images noise_factor = 0.5 sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) # Get images from the batch imgs = batch[0].reshape((-1, 28, 28, 1)) # Add random noise to the input images noisy_imgs = imgs + noise_factor * np.random.randn(*imgs.shape) # Clip the images to be between 0 and 1 noisy_imgs = np.clip(noisy_imgs, 0., 1.) # Noisy images as inputs, original images as targets batch_cost, _ = sess.run([cost, opt], feed_dict={inputs_: noisy_imgs, targets_: imgs}) print("Epoch: {}/{}...".format(e+1, epochs), "Training loss: {:.4f}".format(batch_cost)) """ Explanation: Denoising As I've mentioned before, autoencoders like the ones you've built so far aren't too useful in practive. However, they can be used to denoise images quite successfully just by training the network on noisy images. We can create the noisy images ourselves by adding Gaussian noise to the training images, then clipping the values to be between 0 and 1. We'll use noisy images as input and the original, clean images as targets. Here's an example of the noisy images I generated and the denoised images. Since this is a harder problem for the network, we'll want to use deeper convolutional layers here, more feature maps. I suggest something like 32-32-16 for the depths of the convolutional layers in the encoder, and the same depths going backward through the decoder. Otherwise the architecture is the same as before. Exercise: Build the network for the denoising autoencoder. It's the same as before, but with deeper layers. I suggest 32-32-16 for the depths, but you can play with these numbers, or add more layers. End of explanation """ fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4)) in_imgs = mnist.test.images[:10] noisy_imgs = in_imgs + noise_factor * np.random.randn(*in_imgs.shape) noisy_imgs = np.clip(noisy_imgs, 0., 1.) reconstructed = sess.run(decoded, feed_dict={inputs_: noisy_imgs.reshape((10, 28, 28, 1))}) for images, row in zip([noisy_imgs, reconstructed], axes): for img, ax in zip(images, row): ax.imshow(img.reshape((28, 28)), cmap='Greys_r') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) fig.tight_layout(pad=0.1) """ Explanation: Checking out the performance Here I'm adding noise to the test images and passing them through the autoencoder. It does a suprising great job of removing the noise, even though it's sometimes difficult to tell what the original number is. End of explanation """
mne-tools/mne-tools.github.io
0.21/_downloads/2fc30e4810d35d643811cc11759b3b9a/plot_resample.ipynb
bsd-3-clause
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com> # # License: BSD (3-clause) from matplotlib import pyplot as plt import mne from mne.datasets import sample """ Explanation: Resampling data When performing experiments where timing is critical, a signal with a high sampling rate is desired. However, having a signal with a much higher sampling rate than is necessary needlessly consumes memory and slows down computations operating on the data. This example downsamples from 600 Hz to 100 Hz. This achieves a 6-fold reduction in data size, at the cost of an equal loss of temporal resolution. End of explanation """ data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif' raw = mne.io.read_raw_fif(raw_fname).crop(120, 240).load_data() """ Explanation: Setting up data paths and loading raw data (skip some data for speed) End of explanation """ events = mne.find_events(raw) epochs = mne.Epochs(raw, events, event_id=2, tmin=-0.1, tmax=0.8, preload=True) # Downsample to 100 Hz print('Original sampling rate:', epochs.info['sfreq'], 'Hz') epochs_resampled = epochs.copy().resample(100, npad='auto') print('New sampling rate:', epochs_resampled.info['sfreq'], 'Hz') # Plot a piece of data to see the effects of downsampling plt.figure(figsize=(7, 3)) n_samples_to_plot = int(0.5 * epochs.info['sfreq']) # plot 0.5 seconds of data plt.plot(epochs.times[:n_samples_to_plot], epochs.get_data()[0, 0, :n_samples_to_plot], color='black') n_samples_to_plot = int(0.5 * epochs_resampled.info['sfreq']) plt.plot(epochs_resampled.times[:n_samples_to_plot], epochs_resampled.get_data()[0, 0, :n_samples_to_plot], '-o', color='red') plt.xlabel('time (s)') plt.legend(['original', 'downsampled'], loc='best') plt.title('Effect of downsampling') mne.viz.tight_layout() """ Explanation: Since downsampling reduces the timing precision of events, we recommend first extracting epochs and downsampling the Epochs object: End of explanation """ # Resample to 300 Hz raw_resampled_300 = raw.copy().resample(300, npad='auto') """ Explanation: When resampling epochs is unwanted or impossible, for example when the data doesn't fit into memory or your analysis pipeline doesn't involve epochs at all, the alternative approach is to resample the continuous data. This can only be done on loaded or pre-loaded data. End of explanation """ print('Number of events before resampling:', len(mne.find_events(raw))) # Resample to 100 Hz (suppress the warning that would be emitted) raw_resampled_100 = raw.copy().resample(100, npad='auto', verbose='error') print('Number of events after resampling:', len(mne.find_events(raw_resampled_100))) # To avoid losing events, jointly resample the data and event matrix events = mne.find_events(raw) raw_resampled, events_resampled = raw.copy().resample( 100, npad='auto', events=events) print('Number of events after resampling:', len(events_resampled)) """ Explanation: Because resampling also affects the stim channels, some trigger onsets might be lost in this case. While MNE attempts to downsample the stim channels in an intelligent manner to avoid this, the recommended approach is to find events on the original data before downsampling. End of explanation """
mne-tools/mne-tools.github.io
0.24/_downloads/8ea2bfc401dbdff70c284d271d62fa8c/label_from_stc.ipynb
bsd-3-clause
# Author: Luke Bloy <luke.bloy@gmail.com> # Alex Gramfort <alexandre.gramfort@inria.fr> # License: BSD-3-Clause import numpy as np import matplotlib.pyplot as plt import mne from mne.minimum_norm import read_inverse_operator, apply_inverse from mne.datasets import sample print(__doc__) data_path = sample.data_path() fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif' subjects_dir = data_path + '/subjects' subject = 'sample' snr = 3.0 lambda2 = 1.0 / snr ** 2 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) # Compute a label/ROI based on the peak power between 80 and 120 ms. # The label bankssts-lh is used for the comparison. aparc_label_name = 'bankssts-lh' tmin, tmax = 0.080, 0.120 # Load data evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0)) inverse_operator = read_inverse_operator(fname_inv) src = inverse_operator['src'] # get the source space # Compute inverse solution stc = apply_inverse(evoked, inverse_operator, lambda2, method, pick_ori='normal') # Make an STC in the time interval of interest and take the mean stc_mean = stc.copy().crop(tmin, tmax).mean() # use the stc_mean to generate a functional label # region growing is halted at 60% of the peak value within the # anatomical label / ROI specified by aparc_label_name label = mne.read_labels_from_annot(subject, parc='aparc', subjects_dir=subjects_dir, regexp=aparc_label_name)[0] stc_mean_label = stc_mean.in_label(label) data = np.abs(stc_mean_label.data) stc_mean_label.data[data < 0.6 * np.max(data)] = 0. # 8.5% of original source space vertices were omitted during forward # calculation, suppress the warning here with verbose='error' func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True, subjects_dir=subjects_dir, connected=True, verbose='error') # take first as func_labels are ordered based on maximum values in stc func_label = func_labels[0] # load the anatomical ROI for comparison anat_label = mne.read_labels_from_annot(subject, parc='aparc', subjects_dir=subjects_dir, regexp=aparc_label_name)[0] # extract the anatomical time course for each label stc_anat_label = stc.in_label(anat_label) pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0] stc_func_label = stc.in_label(func_label) pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0] # flip the pca so that the max power between tmin and tmax is positive pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))]) pca_func *= np.sign(pca_func[np.argmax(np.abs(pca_anat))]) """ Explanation: Generate a functional label from source estimates Threshold source estimates and produce a functional label. The label is typically the region of interest that contains high values. Here we compare the average time course in the anatomical label obtained by FreeSurfer segmentation and the average time course from the functional label. As expected the time course in the functional label yields higher values. End of explanation """ plt.figure() plt.plot(1e3 * stc_anat_label.times, pca_anat, 'k', label='Anatomical %s' % aparc_label_name) plt.plot(1e3 * stc_func_label.times, pca_func, 'b', label='Functional %s' % aparc_label_name) plt.legend() plt.show() """ Explanation: plot the time courses.... End of explanation """ brain = stc_mean.plot(hemi='lh', subjects_dir=subjects_dir) brain.show_view('lateral') # show both labels brain.add_label(anat_label, borders=True, color='k') brain.add_label(func_label, borders=True, color='b') """ Explanation: plot brain in 3D with mne.viz.Brain if available End of explanation """
danresende/deep-learning
gan_mnist/Intro_to_GANs_Exercises.ipynb
mit
%matplotlib inline import pickle as pkl import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data') """ Explanation: Generative Adversarial Network In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits! GANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out: Pix2Pix CycleGAN A whole list The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator. The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator. The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow. End of explanation """ def model_inputs(real_dim, z_dim): inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real') inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z') return inputs_real, inputs_z """ Explanation: Model Inputs First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks. Exercise: Finish the model_inputs function below. Create the placeholders for inputs_real and inputs_z using the input sizes real_dim and z_dim respectively. End of explanation """ def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01): ''' Build the generator network. Arguments --------- z : Input tensor for the generator out_dim : Shape of the generator output n_units : Number of units in hidden layer reuse : Reuse the variables with tf.variable_scope alpha : leak parameter for leaky ReLU Returns ------- out, logits: ''' with tf.variable_scope('generator', reuse=reuse): # finish this # Hidden layer h1 = tf.layers.dense(z, n_units) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) # Logits and tanh output logits = tf.layers.dense(h1, out_dim) out = tf.nn.tanh(logits) return out """ Explanation: Generator network Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values. Variable Scope Here we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks. We could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again. To use tf.variable_scope, you use a with statement: python with tf.variable_scope('scope_name', reuse=False): # code here Here's more from the TensorFlow documentation to get another look at using tf.variable_scope. Leaky ReLU TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x: $$ f(x) = max(\alpha * x, x) $$ Tanh Output The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1. Exercise: Implement the generator network in the function below. You'll need to return the tanh output. Make sure to wrap your code in a variable scope, with 'generator' as the scope name, and pass the reuse keyword argument from the function to tf.variable_scope. End of explanation """ def discriminator(x, n_units=128, reuse=False, alpha=0.01): ''' Build the discriminator network. Arguments --------- x : Input tensor for the discriminator n_units: Number of units in hidden layer reuse : Reuse the variables with tf.variable_scope alpha : leak parameter for leaky ReLU Returns ------- out, logits: ''' with tf.variable_scope('discriminator', reuse=reuse): # finish this # Hidden layer h1 = tf.layers.dense(x, n_units) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) logits = tf.layers.dense(h1, 1) out = tf.sigmoid(logits) return out, logits """ Explanation: Discriminator The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer. Exercise: Implement the discriminator network in the function below. Same as above, you'll need to return both the logits and the sigmoid output. Make sure to wrap your code in a variable scope, with 'discriminator' as the scope name, and pass the reuse keyword argument from the function arguments to tf.variable_scope. End of explanation """ # Size of input image to discriminator input_size = 784 # 28x28 MNIST images flattened # Size of latent vector to generator z_size = 100 # Sizes of hidden layers in generator and discriminator g_hidden_size = 128 d_hidden_size = 128 # Leak factor for leaky ReLU alpha = 0.01 # Label smoothing smooth = 0.1 """ Explanation: Hyperparameters End of explanation """ tf.reset_default_graph() # Create our input placeholders input_real, input_z = model_inputs(input_size, z_size) # Generator network here g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha) # g_model is the generator output # Disriminator network here d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha) d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha) """ Explanation: Build network Now we're building the network from the functions defined above. First is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z. Then, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes. Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True). Exercise: Build the network from the functions you defined earlier. End of explanation """ # Calculate losses d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_logits_real) * (1 - smooth))) d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_logits_real))) d_loss = d_loss_real + d_loss_fake g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_logits_fake))) """ Explanation: Discriminator and Generator Losses Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropys, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like python tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) For the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth) The discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that. Finally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images. Exercise: Calculate the losses for the discriminator and the generator. There are two discriminator losses, one for real images and one for fake images. For the real image loss, use the real logits and (smoothed) labels of ones. For the fake image loss, use the fake logits with labels of all zeros. The total discriminator loss is the sum of those two losses. Finally, the generator loss again uses the fake logits from the discriminator, but this time the labels are all ones because the generator wants to fool the discriminator. End of explanation """ # Optimizers learning_rate = 0.002 # Get the trainable_variables, split into G and D parts t_vars = tf.trainable_variables() g_vars = [var for var in t_vars if var.name.startswith('generator')] d_vars = [var for var in t_vars if var.name.startswith('discriminator')] d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars) g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars) """ Explanation: Optimizers We want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph. For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables to start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance). We can do something similar with the discriminator. All the variables in the discriminator start with discriminator. Then, in the optimizer we pass the variable lists to var_list in the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list. Exercise: Below, implement the optimizers for the generator and discriminator. First you'll need to get a list of trainable variables, then split that list into two lists, one for the generator variables and another for the discriminator variables. Finally, using AdamOptimizer, create an optimizer for each network that update the network variables separately. End of explanation """ batch_size = 100 epochs = 100 samples = [] losses = [] saver = tf.train.Saver(var_list = g_vars) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) # Get images, reshape and rescale to pass to D batch_images = batch[0].reshape((batch_size, 784)) batch_images = batch_images*2 - 1 # Sample random noise for G batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size)) # Run optimizers _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z}) _ = sess.run(g_train_opt, feed_dict={input_z: batch_z}) # At the end of each epoch, get the losses and print them out train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images}) train_loss_g = g_loss.eval({input_z: batch_z}) print("Epoch {}/{}...".format(e+1, epochs), "Discriminator Loss: {:.4f}...".format(train_loss_d), "Generator Loss: {:.4f}".format(train_loss_g)) # Save losses to view after training losses.append((train_loss_d, train_loss_g)) # Sample from generator as we're training for viewing afterwards sample_z = np.random.uniform(-1, 1, size=(16, z_size)) gen_samples = sess.run( generator(input_z, input_size, reuse=True), feed_dict={input_z: sample_z}) samples.append(gen_samples) saver.save(sess, './checkpoints/generator.ckpt') # Save training generator samples with open('train_samples.pkl', 'wb') as f: pkl.dump(samples, f) """ Explanation: Training End of explanation """ %matplotlib inline import matplotlib.pyplot as plt fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator') plt.plot(losses.T[1], label='Generator') plt.title("Training Losses") plt.legend() """ Explanation: Training loss Here we'll check out the training losses for the generator and discriminator. End of explanation """ def view_samples(epoch, samples): fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True) for ax, img in zip(axes.flatten(), samples[epoch]): ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) im = ax.imshow(img.reshape((28,28)), cmap='Greys_r') return fig, axes # Load samples from generator taken while training with open('train_samples.pkl', 'rb') as f: samples = pkl.load(f) """ Explanation: Generator samples from training Here we can view samples of images from the generator. First we'll look at images taken while training. End of explanation """ _ = view_samples(-1, samples) """ Explanation: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make. End of explanation """ rows, cols = 10, 6 fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True) for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes): for img, ax in zip(sample[::int(len(sample)/cols)], ax_row): ax.imshow(img.reshape((28,28)), cmap='Greys_r') ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) """ Explanation: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion! End of explanation """ saver = tf.train.Saver(var_list=g_vars) with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) sample_z = np.random.uniform(-1, 1, size=(16, z_size)) gen_samples = sess.run( generator(input_z, input_size, reuse=True), feed_dict={input_z: sample_z}) view_samples(0, [gen_samples]) """ Explanation: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3. Sampling from the generator We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples! End of explanation """
NeuPhysics/aNN
ipynb/Basics.ipynb
mit
import numpy as np print np.linspace(0,9,10), np.exp(-np.linspace(0,9,10)) """ Explanation: A Physicist's Crash Course on Artificial Neural Network What is a Neuron What a neuron does is to response when a stimulation is given. This response could be strong or weak or even null. If I would draw a figure, of this behavior, it looks like this. <img src="assets/images/neuronResponse.png" width=100%> Artificial Neural Network A simple network is a collection of neurons that response to stimulations, which could be the responses of other neurons. <img src="assets/images/neuralNetworkSimple.png" width=100%> A given input signal is spreaded onto three different neurons. The neurons respond to this signal sperately then summed together with different weights. In the language of math, given input $x$, output $y(x)$ is $$ y(x) = \sum_{k=1}^{3} v_k * \text{activation}( w_k * x + u_k ) $$ where $\text{activation}$ is the activation function, i.e., the response behavior of the neuron. This is a single layer structure. A lot of different ways could be used to extend this network. Increase the number of neurons on one layer. One can extend the number of layers. <img src="assets/images/multilayer.png" width=100%> We could also include interactions between the neurons. Even memory can be simulated. How it works Here is an exmaple of how the network works. Suppose we have only two neurons in the network. <img src="assets/images/2neuronNet.png" width=100%> Seen from this example, we can expect neural network to be good at classification. With one neuron, we can do a classification too. For example we can choose proper parameters so that we have a input temperature and a output that tells us which is high temperature which is low temperature. Training We have got a lot of paramters with the set up of the network. The parameters are the degree of freedom we have. The question is how to get the right paramters. The Network NEEDS TRAINING. Just like human learning, the neural network have to be trained using prepared data. One example would be End of explanation """ # This line configures matplotlib to show figures embedded in the notebook, # instead of opening a new window for each figure. More about that later. # If you are using an old version of IPython, try using '%pylab inline' instead. %matplotlib inline from scipy.optimize import minimize from scipy.special import expit import matplotlib.pyplot as plt import timeit """ Explanation: Balance bewteen 'speed' (Beta-coefficient) and 'momentum' of the learning Problems: over-trained or 'grandmothered' -> respond only to one set of problems For References A very basic introduction: http://pages.cs.wisc.edu/~bolo/shipyard/neural/local.html Code Practice End of explanation """ fun = lambda x: (x[0] - 1)**2 + (x[1] - 2.5)**2 minimize(fun,(2,1),method="Nelder-Mead") def fun_jacf(x): np.asarray(x) return np.array([2*(x[0] - 1),2*(x[1] - 2.5)]) minimize(fun,(2,1),method="BFGS",jac=fun_jacf) """ Explanation: Minimize An Expression This is a practice of minimizing an expression using scipy.optimize.minimize() End of explanation """ def cost(v,w,u,t): v = np.array(v) # Don't know why but np.asarray(v) doesn't work here. w = np.array(w) u = np.array(u) fvec = np.array(trigf(t*w + u) ) # This is a vector!!! yt = 1 + np.sum ( t * v * fvec ) # For a given t, this calculates the value of y(t), given the parameters, v, w, u. return ( np.sum (v*fvec + t * v* fvec * ( 1 - fvec ) * w ) + yt ) ** 2 # return np.sum(np.array( v*np.array( trigf( np.array( t*w ) + u ) ) ) + np.array( t*np.array( v*np.array( trigf(np.array( t*w ) + u)) ) ) * ( 1 - np.array( trigf( np.array( t*w )+u) ) ) * w + ( 1 + np.array( t*np.array( v*np.array( trigf( np.array(t*w)+u ) ) ) ) ) ) # trigf() should return an array with the same length of the input. """ Explanation: Here is a summary: The jac parameter should be an array. Feed an array to it. The array should be the gradient at point [x[0],x[1],...] for each of these variables. There are other minimizing methods here http://scipy-lectures.github.io/advanced/mathematical_optimization/. ANN Solving A Simple Problem The problem to solve is the differential equation $$\frac{d}{dt}y(t)= - y(t).$$ Using the network, this is $$y_i= 1+t_i v_k f(t_i w_k+u_k).$$ The procedures are Deal with the function first. The cost is $$I=\sum_i\left( \frac{dy_i}{dt}+y_i \right)^2.$$ Our purpose is to minimize this cost. To calculate the differential of y, we can write down the explicit expression for it. $$\frac{dy}{dt} = v_k f(t w_k+u_k) + t v_k f(tw_k+u_k) (1-f(tw_k+u_k))w_k,$$ where the function f is defined as a trigf(). So the cost becomse $$I = \sum_i \left( v_k f(t w_k+u_k) + t v_k f(tw_k+u_k) (1-f(tw_k+u_k)) w_k + y \right)^2.$$ End of explanation """ def trigf(x): #return 1/(1+np.exp(-x)) # return expit(x) """ Explanation: Caution: a number times an array is not returned as array but instead as list. and list + list doesn't conserved the length of the list! Define the trigf() next, usually we use $$trigf(x)=\frac{1}{1+\exp(-x)}$$. End of explanation """ test11 = np.ones(30) cost(np.array([1,1,1]),[1,1,1],[1,1,1],1) """ Explanation: Test cost function: End of explanation """ def costTotal(v,w,u,t): t = np.array(t) costt = 0 for temp in t: costt = costt + cost(v,w,u,temp) return costt """ Explanation: Next step is to optimize this cost. To do this we need the derivitive. But anyway let's try a simple minimization first. End of explanation """ test11 = np.ones(30) tlintest = np.linspace(0,1,2) print costTotal(np.ones(10),np.ones(10),2*np.ones(10),tlintest) print costTotal(np.ones(10),np.ones(10),np.ones(10),tlintest) """ Explanation: Test total cost End of explanation """ tlin = np.linspace(0,5,11) print tlin """ Explanation: Suppose the parameters are five dimensional and we have 10 data points. End of explanation """ ## No need to define such a function! Use np.split(x,3) instead. np.zeros(30) # This is only an example of 2dimensional neural network. costTotalF = lambda x: costTotal(np.split(x,3)[0],np.split(x,3)[1],np.split(x,3)[2],tlin) initGuess = np.zeros(30) # initGuess = np.random.rand(1,30)+2 start1 = timeit.default_timer() minimize(costTotalF,initGuess,method="Nelder-Mead") # minimize(costTotalF,initGuess,method="L-BFGS-B") # minimize(costTotalF,initGuess,method="TNC") stop1 = timeit.default_timer() print stop1 - start1 """ Explanation: Define a list divier that splits an array into three arrays. End of explanation """ def mhelper(v,w,u,t): ## This function should output a result ## t is a number in this function not array!! v = np.array(v) w = np.array(w) u = np.array(u) return np.sum( v*trigf( t*w + u ) + t* v* trigf(t*w + u) * ( 1 - trigf( t*w +u) ) * w ) + ( 1 + np.sum( t * v * trigf( t*w +u ) ) ) # Checked # Pass def vhelper(v,w,u,t): v = np.array(v) w = np.array(w) u = np.array(u) return trigf(t*w+u) + t*trigf(t*w+u)*( 1-trigf(t*w+u) )*w + t*trigf(t*w+u) def whelper(v,w,u,t): v = np.array(v) w = np.array(w) u = np.array(u) return v*t*trigf(t*w+u)*( 1- trigf(t*w+u) ) + t*v*( trigf(t*w+u)*(1-trigf(t*w+u))*t* (1-trigf(t*w+u)) )*w - t*v*trigf(t*w+u)*trigf(t*w+u)*(1-trigf(t*w+u))*t*w + t*v*trigf(t*w+u)*(1-trigf(t*w+u)) + t*v*trigf(t*w+u)*(1-trigf(t*w+u))*t def uhelper(v,w,u,t): v = np.array(v) w = np.array(w) u = np.array(u) return v*trigf(t*w+u)*( 1 - trigf(t*w+u)) + t* v * trigf(t*w+u) * (1-trigf(t*w+u))*(1-trigf(t*w+u))*w - t*v*trigf(t*w+u)*trigf(t*w+u)*(1-trigf(t*w+u))*w + t*v*trigf(t*w+u)*(1-trigf(t*w+u)) mhelper([1,2],[2,3],[3,4],[1]) vhelper([1,2],[2,3],[3,4],[1,2]) """ Explanation: It shows that the minimization depends greatly on the initial guess. It is not true for a simple scenario with gradient descent however it could be the case if the landscape is too complicated. Use Jac I can define a function that deals with this part: $$M = v_k f(t w_k+u_k) + t v_k f(tw_k+u_k) (1-f(tw_k+u_k))w_k + y,$$ which is actually an array given an array input. So the cost is $$I = M_i M_i,$$ using summation rule. The derivative is always $$\partial_X I = 2 M_i \partial_X M_i .$$ So we have $$\partial_{w_{k'}}f(tw_k+u_k) = f(t w_k+u_k) (1 - f(t w_k+u_k) ) t . $$ $$\partial_{u_{k'}}f(t w_k+u_k) = f(t w_k+u_k) (1 - f(t w_k+u_k) ) . $$ One of the useful relation is $$\frac{df(x)}{dx} = f(x)(1-f(x)).$$ Derived by hand, the jac is a list of the following for $v_\alpha$ (Note that the k in this expression should be $\alpha$ and no summation should be done.) (double checked): $$2M_i(f(tw_{k'}+u_{k'}) +t f(tw_{k'}+u_{k'})(1-f(tw_{k'}+u_{k'}))w_{k'} + tf(tw_{k'} +u_{k'} )),$$ for $w_\alpha$ (Note that the k in this expression should be $\alpha$ and no summation should be done.) (double checked): $$2M_i( v_{k'}tf(1-f) + t v_{k'}f(1-f)t(1-f) w_{k'} - t v_{k'} f f(1-f ) t w_{k'} + tv_{k'} f(1-f) + t v_{k'} f(')( 1 - f(') ) t ),$$ for $u_\alpha$ (Note that the k in this expression should be $\alpha$ and no summation should be done.) (double checked): $$v_{k'} f(1-f) + t v_{k'} f(1-f) (1-f)w_{k'} - t v_{k'} f f(1-f) w_{k'} + t v_{k'} f(1-f) .$$ where $k'$ is not summed over. Define a help function M here: End of explanation """ def mhelperT(v,w,u,t): t = np.array(t) mhelperT = 0 for temp in t: mhelperT = mhelperT + mhelper(v,w,u,temp) return mhelperT def vhelperT(v,w,u,t): t = np.array(t) vhelperT = 0 for temp in t: vhelperT = vhelperT + vhelper(v,w,u,temp) return vhelperT def whelperT(v,w,u,t): t = np.array(t) whelperT = 0 for temp in t: whelperT = whelperT + whelper(v,w,u,temp) return whelperT def uhelperT(v,w,u,t): t = np.array(t) uhelperT = 0 for temp in t: uhelperT = uhelperT + uhelper(v,w,u,temp) return uhelperT def costJac(v,w,u,t): v = np.array(v) w = np.array(w) u = np.array(u) vout = 0 wout = 0 uout = 0 for temp in t: vout = vout + 2*mhelper(v,w,u,temp)*vhelper(v,w,u,temp) wout = wout + 2*mhelper(v,w,u,temp)*whelper(v,w,u,temp) uout = uout + 2*mhelper(v,w,u,temp)*uhelper(v,w,u,temp) out = np.hstack((vout,wout,uout)) return np.array(out) print uhelperT([1,2],[2,3],[3,4],[1,2,3]),mhelperT([1,2],[2,3],[3,4],[1]),whelperT([1,2],[2,3],[3,4],[1]),vhelperT([1,2],[2,3],[3,4],[1]) costJac([1,2,3],[2,3,1],[3,4,3],[1,2]) costJacF = lambda x: costJac(np.split(x,3)[0],np.split(x,3)[1],np.split(x,3)[2],tlin) initGuessJ = np.zeros(30) # initGuessJ = np.random.rand(1,30)+2 minimize(costTotalF,initGuessJ,method="Newton-CG",jac=costJacF) """ Explanation: Define the jac of cost function End of explanation """ # funYNN(np.ones(10),np.ones(10),np.ones(10),2) test13=np.array([-57.2424592 , -57.2424592 , -57.2424592 , -57.2424592 , -57.2424592 , -57.2424592 , -57.2424592 , -57.2424592 , -57.2424592 , -57.2424592 , -0.28879104, -0.28879104, -0.28879104, -0.28879104, -0.28879104, -0.28879104, -0.28879104, -0.28879104, -0.28879104, -0.28879104, -6.5643978 , -6.5643978 , -6.5643978 , -6.5643978 , -6.5643978 , -6.5643978 , -6.5643978 , -6.5643978 , -6.5643978 , -6.5643978 ]) for i in np.linspace(0,5,11): print i,functionYNN(np.split(test13,3)[0],np.split(test13,3)[1],np.split(test13,3)[2],np.array([i]))[0] temp14 = np.array([]) for i in np.linspace(0,5,11): temp14 = np.append(temp14,functionYNN(np.split(test13,3)[0],np.split(test13,3)[1],np.split(test13,3)[2],np.array([i]))[0]) testTLin = np.linspace(0,5,11) plt.figure(figsize=(10,6.18)) plt.plot(testTLin,functionY(testTLin),'bs') plt.plot(testTLin,temp14,'r-') plt.show() temp16 = np.array([1.,0.60129567, 0.36281265 , 0.22220159 , 0.13660321,0.08295538 , 0.04904239 ,0.02817984 , 0.01636932 , 0.01048201, 0.00741816]) temp15 = np.linspace(0,5,11) print temp15 plt.plot(temp15,temp16) plt.plot(temp15,functionY(temp15),'bs') plt.show() test17 = np.array([]) for temp in np.linspace(0,5,11): test171 = 1 + expit(10*temp) test17 = np.append(test17,test171) print np.array(test17) 1 + expit(10*0) def functionYNNSt(v,w,u,t): # t is a single scalar value t = np.array(t) return 1 + np.sum(t * v * trigf( t*w +u ) ) def functionYNN(v,w,u,t): t = np.array(t) func = np.asarray([]) for temp in t: func = np.append(func, functionYNNSt(v,w,u,temp) ) return np.array(func) def functionY(t): return np.exp(-t) print functionYNN(np.array([1,2]),np.array([1,2]),np.array([1,2]),tlin) # structArray=np.array([-1.77606225*np.exp(-01), -3.52080053*np.exp(-01), -1.77606225*np.exp(-01), # -1.77606225*np.exp(-01), -8.65246997*np.exp(-14), 1.00000000, # -8.65246997*np.exp(-14), -8.65246997*np.exp(-14), -1.13618293*np.exp(-14), # -7.57778017*np.exp(-16), -1.13618293*np.exp(-14), -1.13618293*np.exp(-14)]) #structArray=np.array([-1.6001368 , -1.6001368 , -2.08065131, -2.06818762, -2.07367757, # -2.06779168, -2.07260669, -2.08533436, -2.07112826, -2.06893266, # -0.03859167, -0.03859167, -0.25919807, -0.66904303, -0.41571841, # -0.76917468, -0.4483773 , -0.17544777, -1.03122022, -0.90581106, # -3.46409689, -3.46409689, -2.83715218, -2.84817563, -2.8434598 , # -2.84773205, -2.84446398, -2.85001617, -2.83613622, -2.84402863]) structArray=np.array([ 0.1330613 , 1.05982273, 0.18777729, -0.60789078, -0.96393469, -0.65270373, -1.55257864, 0.8002259 , -0.12414033, -0.21230861, -0.88629202, 0.47527367, 0.21401419, 0.2130512 , -1.5236408 , 1.35208616, -0.48922234, -0.85850735, 0.72135512, -1.03407686, 2.29041152, 0.91184671, -0.56987761, 0.16597395, -0.43267372, 2.1772668 , -0.1318482 , -0.80817762, 0.44533168, -0.28545885]) structArrayJ = np.array([-11.45706046, -11.45706046, -11.45706046, -11.45706046, -11.45706046, -11.45706046, -11.45706046, -11.45706046, -11.45706046, -11.45706046, -0.44524438, -0.44524438, -0.44524438, -0.44524438, -0.44524438, -0.44524438, -0.44524438, -0.44524438, -0.44524438, -0.44524438, -4.7477771 , -4.7477771 , -4.7477771 , -4.7477771 , -4.7477771 , -4.7477771 , -4.7477771 , -4.7477771 , -4.7477771 , -4.7477771 ]) print("The Structure Array is \n {}".format(structArray)) # print np.split(structArray,3)[0],np.split(structArray,3)[1],np.split(structArray,3)[2] testTLin = np.linspace(0,5,11) print "\n \n The plot is" plt.figure(figsize=(10,6.18)) plt.plot(testTLin,functionY(testTLin),'bs') plt.plot(testTLin,functionYNN(structArray[0],structArray[1],structArray[2],testTLin),'g^') plt.plot(testTLin,functionYNN(structArrayJ[0],structArrayJ[1],structArrayJ[2],testTLin),'r^') plt.yscale('log') plt.show() print functionY(testTLin), functionYNN(structArray[0],structArray[1],structArray[2],testTLin), functionYNN(structArrayJ[0],structArrayJ[1],structArrayJ[2],testTLin) ## Test of Numpy temp1=np.asarray([1,2,3]) temp2=np.asarray([4,5,6]) temp3=np.asarray([7,8,9]) temp1*temp2 print 3*temp1 temp1+temp2 print temp1*temp2*temp3*temp1 1/(1+np.exp(-temp1)) temp1 + temp2 [1,2] + [2,3] 1 - 3*np.array([1,2]) temp1**2 1+np.asarray([1,2,3]) def testfunction(v,w,u,t): v = np.array(v) w = np.array(w) u = np.array(u) return t*w + u #return np.sum(v*trigf( t*w + u )) testfunction([2,3,4],[3,4,5],[4,5,7],2) """ Explanation: Test Results Plot! End of explanation """ def costS(v,w,u,x): v = np.array(v) # Don't know why but np.asarray(v) doesn't work here. w = np.array(w) u = np.array(u) fvec = np.array(trigf(x*w + u) ) # This is a vector!!! yx = np.sum ( x * v * fvec ) # For a given x, this calculates the value of y(t), given the parameters, v, w, u. dySLASHdt = np.sum (v*fvec + x * v* fvec * ( 1 - fvec ) * w ) return ( dySLASHdt - yx )**2 costS(np.array([2,3,4]),[3,4,5],[4,5,7],4) def costSTotal(v,w,u,x): x = np.array(x) costSt = 0 for temp in x: costSt = costSt + costS(v,w,u,temp) return costSt print costSTotal([1,2,3],[2,3,2],[3,4,1],[1,2,3,4,5,2,6,1]) xlinS = np.linspace(0,1,10) print xlinS # This is only an example of 2dimensional neural network. costSTotalF = lambda x: costSTotal(np.split(x,3)[0],np.split(x,3)[1],np.split(x,3)[2],xlinS) # initGuessS = np.zeros(30) initGuessS = np.random.rand(1,30)+2 # minimize(costTotalF,([1,0,3,0,1,1,2,0,1,0,1,0]),method="Nelder-Mead") minimize(costSTotalF,(initGuessS),method="L-BFGS-B") # minimize(costTotalF,([1,0,3,0,1,1,2,0,1,0,1,0]),method="TNC") def functionSYNN(v,w,u,x): # t is a single scalar value x = np.array(x) func = np.asarray([]) for temp in x: tempfunc = np.sum(temp * v * trigf( temp*w +u ) ) func = np.append(func, tempfunc) return np.array(func) def functionSY(x): return x**4 - x**3 + 2*x # structArray=np.array([-1.77606225*np.exp(-01), -3.52080053*np.exp(-01), -1.77606225*np.exp(-01), # -1.77606225*np.exp(-01), -8.65246997*np.exp(-14), 1.00000000, # -8.65246997*np.exp(-14), -8.65246997*np.exp(-14), -1.13618293*np.exp(-14), # -7.57778017*np.exp(-16), -1.13618293*np.exp(-14), -1.13618293*np.exp(-14)]) #structArray=np.array([-1.6001368 , -1.6001368 , -2.08065131, -2.06818762, -2.07367757, # -2.06779168, -2.07260669, -2.08533436, -2.07112826, -2.06893266, # -0.03859167, -0.03859167, -0.25919807, -0.66904303, -0.41571841, # -0.76917468, -0.4483773 , -0.17544777, -1.03122022, -0.90581106, # -3.46409689, -3.46409689, -2.83715218, -2.84817563, -2.8434598 , # -2.84773205, -2.84446398, -2.85001617, -2.83613622, -2.84402863]) structArrayS=np.array([ 0.01462306, 0.13467016, 0.43137834, 0.32915392, 0.16398891, -0.36502654, -0.1943661 , 0.16082714, -0.2923346 , -0.38280994, 2.23127245, 1.97866504, 2.95181241, 2.70643394, 2.19371603, 2.63386948, 2.20213407, 2.81089774, 2.43916804, 2.80375489, 2.32389017, 2.16118574, 2.7346048 , 2.18630694, 2.19932286, 2.52525807, 2.22125577, 2.81758156, 2.27231039, 2.6118171 ]) print("The Structure Array is \n {}".format(structArray)) # print np.split(structArray,3)[0],np.split(structArray,3)[1],np.split(structArray,3)[2] testXLinS = np.linspace(0,1,10) print "\n \n The plot is" plt.figure(figsize=(10,6.18)) plt.plot(testXLinS,functionSY(testXLinS),'bs') plt.plot(testXLinS,functionSYNN(structArrayS[0],structArrayS[1],structArrayS[2],testXLinS),'g^') ## plt.plot(testXLin,functionYNN(structArrayJ[0],structArrayJ[1],structArrayJ[2],testXLin),'r^') plt.show() print functionY(testXLinS), functionYNN(structArrayS[0],structArrayS[1],structArrayS[2],testXLinS) """ Explanation: A Even Simpler Equation Test a very simple equation $$\frac{dy}{dx}=4x^3-3x^2+2,$$ with initial condition $$y(0)=0.$$ As in any case, $$y = \text{Initial} + x_i v_k f(x_iw_k+u_k).$$ $$\frac{dy}{dx} = v_k f(x w_k+u_k) + t v_k f(x w_k+u_k) (1-f(xw_k+u_k))w_k,$$ where the function f is defined as a trigf(). Cost is $$I = \sum_i \left(\frac{dy}{dx}-(4x^2-3x^2+2) \right)^2$$ End of explanation """
tritemio/multispot_paper
out_notebooks/usALEX-5samples-PR-raw-out-Dex-27d.ipynb
mit
ph_sel_name = "Dex" data_id = "27d" # ph_sel_name = "all-ph" # data_id = "7d" """ Explanation: Executed: Mon Mar 27 11:36:12 2017 Duration: 8 seconds. usALEX-5samples - Template This notebook is executed through 8-spots paper analysis. For a direct execution, uncomment the cell below. End of explanation """ from fretbursts import * init_notebook() from IPython.display import display """ Explanation: Load software and filenames definitions End of explanation """ data_dir = './data/singlespot/' import os data_dir = os.path.abspath(data_dir) + '/' assert os.path.exists(data_dir), "Path '%s' does not exist." % data_dir """ Explanation: Data folder: End of explanation """ from glob import glob file_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f) ## Selection for POLIMI 2012-11-26 datatset labels = ['17d', '27d', '7d', '12d', '22d'] files_dict = {lab: fname for lab, fname in zip(labels, file_list)} files_dict ph_sel_map = {'all-ph': Ph_sel('all'), 'Dex': Ph_sel(Dex='DAem'), 'DexDem': Ph_sel(Dex='Dem')} ph_sel = ph_sel_map[ph_sel_name] data_id, ph_sel_name """ Explanation: List of data files: End of explanation """ d = loader.photon_hdf5(filename=files_dict[data_id]) """ Explanation: Data load Initial loading of the data: End of explanation """ d.ph_times_t, d.det_t """ Explanation: Laser alternation selection At this point we have only the timestamps and the detector numbers: End of explanation """ d.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0) """ Explanation: We need to define some parameters: donor and acceptor ch, excitation period and donor and acceptor excitiations: End of explanation """ plot_alternation_hist(d) """ Explanation: We should check if everithing is OK with an alternation histogram: End of explanation """ loader.alex_apply_period(d) """ Explanation: If the plot looks good we can apply the parameters with: End of explanation """ d """ Explanation: Measurements infos All the measurement data is in the d variable. We can print it: End of explanation """ d.time_max """ Explanation: Or check the measurements duration: End of explanation """ d.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7) dplot(d, timetrace_bg) d.rate_m, d.rate_dd, d.rate_ad, d.rate_aa """ Explanation: Compute background Compute the background using automatic threshold: End of explanation """ bs_kws = dict(L=10, m=10, F=7, ph_sel=ph_sel) d.burst_search(**bs_kws) th1 = 30 ds = d.select_bursts(select_bursts.size, th1=30) bursts = (bext.burst_data(ds, include_bg=True, include_ph_index=True) .round({'E': 6, 'S': 6, 'bg_d': 3, 'bg_a': 3, 'bg_aa': 3, 'nd': 3, 'na': 3, 'naa': 3, 'nda': 3, 'nt': 3, 'width_ms': 4})) bursts.head() burst_fname = ('results/bursts_usALEX_{sample}_{ph_sel}_F{F:.1f}_m{m}_size{th}.csv' .format(sample=data_id, th=th1, **bs_kws)) burst_fname bursts.to_csv(burst_fname) assert d.dir_ex == 0 assert d.leakage == 0 print(d.ph_sel) dplot(d, hist_fret); # if data_id in ['7d', '27d']: # ds = d.select_bursts(select_bursts.size, th1=20) # else: # ds = d.select_bursts(select_bursts.size, th1=30) ds = d.select_bursts(select_bursts.size, add_naa=False, th1=30) n_bursts_all = ds.num_bursts[0] def select_and_plot_ES(fret_sel, do_sel): ds_fret= ds.select_bursts(select_bursts.ES, **fret_sel) ds_do = ds.select_bursts(select_bursts.ES, **do_sel) bpl.plot_ES_selection(ax, **fret_sel) bpl.plot_ES_selection(ax, **do_sel) return ds_fret, ds_do ax = dplot(ds, hist2d_alex, S_max_norm=2, scatter_alpha=0.1) if data_id == '7d': fret_sel = dict(E1=0.60, E2=1.2, S1=0.2, S2=0.9, rect=False) do_sel = dict(E1=-0.2, E2=0.5, S1=0.8, S2=2, rect=True) ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) elif data_id == '12d': fret_sel = dict(E1=0.30,E2=1.2,S1=0.131,S2=0.9, rect=False) do_sel = dict(E1=-0.4, E2=0.4, S1=0.8, S2=2, rect=False) ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) elif data_id == '17d': fret_sel = dict(E1=0.01, E2=0.98, S1=0.14, S2=0.88, rect=False) do_sel = dict(E1=-0.4, E2=0.4, S1=0.80, S2=2, rect=False) ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) elif data_id == '22d': fret_sel = dict(E1=-0.16, E2=0.6, S1=0.2, S2=0.80, rect=False) do_sel = dict(E1=-0.2, E2=0.4, S1=0.85, S2=2, rect=True) ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) elif data_id == '27d': fret_sel = dict(E1=-0.1, E2=0.5, S1=0.2, S2=0.82, rect=False) do_sel = dict(E1=-0.2, E2=0.4, S1=0.88, S2=2, rect=True) ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel) n_bursts_do = ds_do.num_bursts[0] n_bursts_fret = ds_fret.num_bursts[0] n_bursts_do, n_bursts_fret d_only_frac = 1.*n_bursts_do/(n_bursts_do + n_bursts_fret) print ('D-only fraction:', d_only_frac) dplot(ds_fret, hist2d_alex, scatter_alpha=0.1); dplot(ds_do, hist2d_alex, S_max_norm=2, scatter=False); """ Explanation: Burst search and selection End of explanation """ def hsm_mode(s): """ Half-sample mode (HSM) estimator of `s`. `s` is a sample from a continuous distribution with a single peak. Reference: Bickel, Fruehwirth (2005). arXiv:math/0505419 """ s = memoryview(np.sort(s)) i1 = 0 i2 = len(s) while i2 - i1 > 3: n = (i2 - i1) // 2 w = [s[n-1+i+i1] - s[i+i1] for i in range(n)] i1 = w.index(min(w)) + i1 i2 = i1 + n if i2 - i1 == 3: if s[i1+1] - s[i1] < s[i2] - s[i1 + 1]: i2 -= 1 elif s[i1+1] - s[i1] > s[i2] - s[i1 + 1]: i1 += 1 else: i1 = i2 = i1 + 1 return 0.5*(s[i1] + s[i2]) E_pr_do_hsm = hsm_mode(ds_do.E[0]) print ("%s: E_peak(HSM) = %.2f%%" % (ds.ph_sel, E_pr_do_hsm*100)) """ Explanation: Donor Leakage fit Half-Sample Mode Fit peak usng the mode computed with the half-sample algorithm (Bickel 2005). End of explanation """ E_fitter = bext.bursts_fitter(ds_do, weights=None) E_fitter.histogram(bins=np.arange(-0.2, 1, 0.03)) E_fitter.fit_histogram(model=mfit.factory_gaussian()) E_fitter.params res = E_fitter.fit_res[0] res.params.pretty_print() E_pr_do_gauss = res.best_values['center'] E_pr_do_gauss """ Explanation: Gaussian Fit Fit the histogram with a gaussian: End of explanation """ bandwidth = 0.03 E_range_do = (-0.1, 0.15) E_ax = np.r_[-0.2:0.401:0.0002] E_fitter.calc_kde(bandwidth=bandwidth) E_fitter.find_kde_max(E_ax, xmin=E_range_do[0], xmax=E_range_do[1]) E_pr_do_kde = E_fitter.kde_max_pos[0] E_pr_do_kde """ Explanation: KDE maximum End of explanation """ mfit.plot_mfit(ds_do.E_fitter, plot_kde=True, plot_model=False) plt.axvline(E_pr_do_hsm, color='m', label='HSM') plt.axvline(E_pr_do_gauss, color='k', label='Gauss') plt.axvline(E_pr_do_kde, color='r', label='KDE') plt.xlim(0, 0.3) plt.legend() print('Gauss: %.2f%%\n KDE: %.2f%%\n HSM: %.2f%%' % (E_pr_do_gauss*100, E_pr_do_kde*100, E_pr_do_hsm*100)) """ Explanation: Leakage summary End of explanation """ nt_th1 = 50 dplot(ds_fret, hist_size, which='all', add_naa=False) xlim(-0, 250) plt.axvline(nt_th1) Th_nt = np.arange(35, 120) nt_th = np.zeros(Th_nt.size) for i, th in enumerate(Th_nt): ds_nt = ds_fret.select_bursts(select_bursts.size, th1=th) nt_th[i] = (ds_nt.nd[0] + ds_nt.na[0]).mean() - th plt.figure() plot(Th_nt, nt_th) plt.axvline(nt_th1) nt_mean = nt_th[np.where(Th_nt == nt_th1)][0] nt_mean """ Explanation: Burst size distribution End of explanation """ E_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, bandwidth=bandwidth, weights='size') E_fitter = ds_fret.E_fitter E_fitter.histogram(bins=np.r_[-0.1:1.1:0.03]) E_fitter.fit_histogram(mfit.factory_gaussian(center=0.5)) E_fitter.fit_res[0].params.pretty_print() fig, ax = plt.subplots(1, 2, figsize=(14, 4.5)) mfit.plot_mfit(E_fitter, ax=ax[0]) mfit.plot_mfit(E_fitter, plot_model=False, plot_kde=True, ax=ax[1]) print('%s\nKDE peak %.2f ' % (ds_fret.ph_sel, E_pr_fret_kde*100)) display(E_fitter.params*100) """ Explanation: Fret fit Max position of the Kernel Density Estimation (KDE): End of explanation """ ds_fret.fit_E_m(weights='size') """ Explanation: Weighted mean of $E$ of each burst: End of explanation """ ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.03], weights=None) """ Explanation: Gaussian fit (no weights): End of explanation """ ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.005], weights='size') E_kde_w = E_fitter.kde_max_pos[0] E_gauss_w = E_fitter.params.loc[0, 'center'] E_gauss_w_sig = E_fitter.params.loc[0, 'sigma'] E_gauss_w_err = float(E_gauss_w_sig/np.sqrt(ds_fret.num_bursts[0])) E_gauss_w_fiterr = E_fitter.fit_res[0].params['center'].stderr E_kde_w, E_gauss_w, E_gauss_w_sig, E_gauss_w_err, E_gauss_w_fiterr """ Explanation: Gaussian fit (using burst size as weights): End of explanation """ S_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, burst_data='S', bandwidth=0.03) #weights='size', add_naa=True) S_fitter = ds_fret.S_fitter S_fitter.histogram(bins=np.r_[-0.1:1.1:0.03]) S_fitter.fit_histogram(mfit.factory_gaussian(), center=0.5) fig, ax = plt.subplots(1, 2, figsize=(14, 4.5)) mfit.plot_mfit(S_fitter, ax=ax[0]) mfit.plot_mfit(S_fitter, plot_model=False, plot_kde=True, ax=ax[1]) print('%s\nKDE peak %.2f ' % (ds_fret.ph_sel, S_pr_fret_kde*100)) display(S_fitter.params*100) S_kde = S_fitter.kde_max_pos[0] S_gauss = S_fitter.params.loc[0, 'center'] S_gauss_sig = S_fitter.params.loc[0, 'sigma'] S_gauss_err = float(S_gauss_sig/np.sqrt(ds_fret.num_bursts[0])) S_gauss_fiterr = S_fitter.fit_res[0].params['center'].stderr S_kde, S_gauss, S_gauss_sig, S_gauss_err, S_gauss_fiterr """ Explanation: Stoichiometry fit Max position of the Kernel Density Estimation (KDE): End of explanation """ S = ds_fret.S[0] S_ml_fit = (S.mean(), S.std()) S_ml_fit """ Explanation: The Maximum likelihood fit for a Gaussian population is the mean: End of explanation """ weights = bl.fret_fit.get_weights(ds_fret.nd[0], ds_fret.na[0], weights='size', naa=ds_fret.naa[0], gamma=1.) S_mean = np.dot(weights, S)/weights.sum() S_std_dev = np.sqrt( np.dot(weights, (S - S_mean)**2)/weights.sum()) S_wmean_fit = [S_mean, S_std_dev] S_wmean_fit """ Explanation: Computing the weighted mean and weighted standard deviation we get: End of explanation """ sample = data_id """ Explanation: Save data to file End of explanation """ variables = ('sample n_bursts_all n_bursts_do n_bursts_fret ' 'E_kde_w E_gauss_w E_gauss_w_sig E_gauss_w_err E_gauss_w_fiterr ' 'S_kde S_gauss S_gauss_sig S_gauss_err S_gauss_fiterr ' 'E_pr_do_kde E_pr_do_hsm E_pr_do_gauss nt_mean\n') """ Explanation: The following string contains the list of variables to be saved. When saving, the order of the variables is preserved. End of explanation """ variables_csv = variables.replace(' ', ',') fmt_float = '{%s:.6f}' fmt_int = '{%s:d}' fmt_str = '{%s}' fmt_dict = {**{'sample': fmt_str}, **{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}} var_dict = {name: eval(name) for name in variables.split()} var_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\n' data_str = var_fmt.format(**var_dict) print(variables_csv) print(data_str) # NOTE: The file name should be the notebook name but with .csv extension with open('results/usALEX-5samples-PR-raw-%s.csv' % ph_sel_name, 'a') as f: f.seek(0, 2) if f.tell() == 0: f.write(variables_csv) f.write(data_str) """ Explanation: This is just a trick to format the different variables: End of explanation """
mwytock/cvxpy
examples/notebooks/WWW/water_filling_BVex5.2.ipynb
gpl-3.0
#!/usr/bin/env python3 # @author: R. Gowers, S. Al-Izzi, T. Pollington, R. Hill & K. Briggs import numpy as np import cvxpy as cvx def water_filling(n,a,sum_x=1): ''' Boyd and Vandenberghe, Convex Optimization, example 5.2 page 145 Water-filling. This problem arises in information theory, in allocating power to a set of n communication channels in order to maximise the total channel capacity. The variable x_i represents the transmitter power allocated to the ith channel, and log(α_i+x_i) gives the capacity or maximum communication rate of the channel. The objective is to minimise -∑log(α_i+x_i) subject to the constraint ∑x_i = 1 ''' # Declare variables and parameters x = cvx.Variable(n) alpha = cvx.Parameter(n,sign='positive') alpha.value = a # Choose objective function. Interpret as maximising the total communication rate of all the channels obj = cvx.Maximize(cvx.sum_entries(cvx.log(alpha + x))) # Declare constraints constraints = [x >= 0, cvx.sum_entries(x) - sum_x == 0] # Solve prob = cvx.Problem(obj, constraints) prob.solve() if(prob.status=='optimal'): return prob.status,prob.value,x.value else: return prob.status,np.nan,np.nan """ Explanation: Water Filling in Communications by Robert Gowers, Roger Hill, Sami Al-Izzi, Timothy Pollington and Keith Briggs from Boyd and Vandenberghe, Convex Optimization, example 5.2 page 145 Convex optimisation can be used to solve the classic water filling problem. This problem is where a total amount of power $P$ has to be assigned to $n$ communication channels, with the objective of maximising the total communication rate. The communication rate of the $i$th channel is given by: $\log(\alpha_i + x_i)$ where $x_i$ represents the power allocated to channel $i$ and $\alpha_i$ represents the floor above the baseline at which power can be added to the channel. Since $-\log(X)$ is convex, we can write the water-filling problem as a convex optimisation problem: minimise $\sum_{i=1}^N -\log(\alpha_i + x_i)$ subject to $x_i \succeq 0$ and $\sum_{i=1}^N x_i = P$ This form is also very straightforward to put into DCP format and thus can be simply solved using CVXPY. End of explanation """ # As an example, we will solve the water filling problem with 3 buckets, each with different α np.set_printoptions(precision=3) buckets=3 alpha = np.array([0.8,1.0,1.2]) stat,prob,x=water_filling(buckets,alpha) print('Problem status: ',stat) print('Optimal communication rate = %.4g '%prob) print('Transmitter powers:\n', x) """ Explanation: Example As a simple example, we set $N = 3$, $P = 1$ and $\boldsymbol{\alpha} = (0.8,1.0,1.2)$. The function outputs whether the problem status, the maximum communication rate and the power allocation required is achieved with this maximum communication rate. End of explanation """ import matplotlib import matplotlib.pylab as plt %matplotlib inline matplotlib.rcParams.update({'font.size': 14}) axis = np.arange(0.5,buckets+1.5,1) index = axis+0.5 X = np.asarray(x).flatten() Y = alpha + X # to include the last data point as a step, we need to repeat it A = np.concatenate((alpha,[alpha[-1]])) X = np.concatenate((X,[X[-1]])) Y = np.concatenate((Y,[Y[-1]])) plt.xticks(index) plt.xlim(0.5,buckets+0.5) plt.ylim(0,1.5) plt.step(axis,A,where='post',label =r'$\alpha$',lw=2) plt.step(axis,Y,where='post',label=r'$\alpha + x$',lw=2) plt.legend(loc='lower right') plt.xlabel('Bucket Number') plt.ylabel('Power Level') plt.title('Water Filling Solution') plt.show() """ Explanation: To illustrate the water filling principle, we will plot $\alpha_i + x_i$ and check that this level is flat where power has been allocated: End of explanation """
napsternxg/ipython-notebooks
Keras Demo.ipynb
apache-2.0
X_org, y = iris.data, iris.target print "Classes present in IRIS", iris.target_names # Convert y to one hot vector for each category enc = OneHotEncoder() y= enc.fit_transform(y[:, np.newaxis]).toarray() # **VERY IMPORTANT STEP** Scale the values so that mean is 0 and variance is 1. # If this step is not performed the Neural network will not converge. The logistic regression model might converge. X = scaler.fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42) X.shape, y.shape # Implement Model in Keras model = Sequential() model.add(Dense(X.shape[1], 2, init='uniform', activation='tanh')) model.add(Dropout(0.5)) model.add(Dense(2, 2, init='uniform', activation='tanh')) model.add(Dropout(0.5)) model.add(Dense(2, y.shape[1], init='uniform', activation='softmax')) #sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) sgd = SGD() # Compile the model using theano model.compile(loss='categorical_crossentropy', optimizer="rmsprop") """ Use this cell only if you want to reset the weights of the model. """ """ #print model.get_weights() model.set_weights(np.array([np.random.uniform(size=k.shape) for k in model.get_weights()])) print model.to_yaml() model.optimizer.lr = 0.01 model.optimizer.decay = 0. model.optimizer.momentum = 0. model.optimizer.nesterov = False """ # Done # Perform cross validated training cond = (y[:,0] == 1) | (y[:,1] == 1) | (y[:,2] == 1) kf = cross_validation.KFold(X[cond].shape[0], n_folds=10, shuffle=True) scores = [] for train_index, test_index in kf: model.fit(X[cond][train_index], y[cond][train_index], nb_epoch=10, batch_size=200, verbose=0) scores.append(model.evaluate(X[cond][test_index], y[cond][test_index], show_accuracy=1)) model.fit(X, y, nb_epoch=100, batch_size=200, verbose=0) scores.append(model.evaluate(X, y, show_accuracy=1)) print scores print np.mean(np.array(scores), axis=0) print model.predict_classes(X[cond][test_index]), np.argmax(y[cond][test_index], axis=1) print set(model.predict_classes(X)) logit = Sequential() logit.add(Dense(X.shape[1], y.shape[1], init='uniform', activation='softmax')) logit_sgd = SGD() logit.compile(loss='categorical_crossentropy', optimizer=logit_sgd) scores = [] kf = cross_validation.KFold(X.shape[0], n_folds=10) for train_index, test_index in kf: logit.fit(X[train_index], y[train_index], nb_epoch=100, batch_size=200, verbose=0) scores.append(logit.evaluate(X[test_index], y[test_index], show_accuracy=1)) print scores print np.mean(np.array(scores), axis=0) """ Explanation: Data Preperation We will work with the Iris data set which is a standard toy dataset for machine learning model evaluation. One Hot Encoding Iris data set consists of 3 class of Iris flowers. The data comes in the format where each element of y corresponds to one integer which uniquely identifies the flower class. Since, the numerical values of y are of no use to us as y is a categorical variable hence it will be more approapriate to convert it into one hot vectors as followes: | Class | One Hot Vector | | ------------- |:-------------:| | 0 | [1, 0, 0] | | 1 | [0, 1, 0] | | 2 | [0, 0, 1] | Feature Scaling Feature scaling is a very important step when training neural network models. Feature scaling ensures that no feature dominates the prediction process because of the high range of values which it acquires for every instance. A common way of using feature scaling is by processing the features so that they have mean = 0.0 and standard deviation = 1.0. Without this step the neural network in most cases will not converge. PS: I tried the neural network without feature scaling and was getting very bad accuracy sometimes as close to 0. End of explanation """ pca = PCA(n_components=2) X_t = pca.fit_transform(X) h = 0.1 x_min, x_max = X_t[:, 0].min() - 1, X_t[:, 0].max() + 1 y_min, y_max = X_t[:, 1].min() - 1, X_t[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) fig, ax = plt.subplots(1,2, figsize=(20,10)) for i, v in enumerate({"Neural Net": model, "Logistic": logit}.items()): # here "model" is your model's prediction (classification) function Z = v[1].predict_classes(pca.inverse_transform(np.c_[xx.ravel(), yy.ravel()])) # Put the result into a color plot Z = Z.reshape(xx.shape) ax[i].contourf(xx, yy, Z, cmap=plt.cm.Paired) #ax[i].set_axis('off') # Plot also the training points ax[i].scatter(X_t[:, 0], X_t[:, 1], c=np.argmax(y, axis=1), cmap=plt.cm.Paired) ax[i].set_title(v[0]) """ Explanation: Plotting decision boundaries We would like to see what decision boundaries is the model learning. However, one issue with plotting our data is that X consists of $4$ dimensional feature vectors. Hence, we transform each feature vector in X to a $2d$ vector using Principal Component Analysis (PCA). The vectors obtained from PCA are then used for showing the points in a $2d$ plane and the decision boundaries of each classifier are shown as well. End of explanation """
lamastex/scalable-data-science
_360-in-525/2018/02/SimonLindgren/MeTooInJupyterIpythonNBAction/Simon_MetooStep1.ipynb
unlicense
from IPython.display import HTML import os """ Explanation: Simon #metoo step 1 End of explanation """ HTML(""" <video width="320" height="240" controls> <source src="btf.m4v" type="video/mp4"> </video> """) """ Explanation: Data was collected using this method. It uses the Twitter API to go some days back in time, and then continuously streaming forward in real-time. The metoo data was collected around the time when the campaign took off, but today we wouldn't be able to get data further back than about a week from now. The video below shows the setup process for the collector. End of explanation """ import sqlite3 import pandas as pd pd.set_option('display.max_colwidth', -1) ## Read sqlite query results into a pandas DataFrame # conn = sqlite3.connect("tweets.db") # tweets_df = pd.read_sql_query("SELECT * from tweets", conn) ## Do some cleanupb #tweets_df = tweets_df.replace({'\n': ' '}, regex=True) # remove linebreaks in the dataframe #tweets_df = tweets_df.replace({'\t': ' '}, regex=True) # remove tabs in the dataframe #tweets_df = tweets_df.replace({'\r': ' '}, regex=True) # remove carriage return in the dataframe """ Explanation: I had collected around 3 million metoo tweets, and they were in sqlite3 database format. We won't run the parsing process live here but the code below shows how it is done. End of explanation """ # Read the csv into pandas metoo_df = pd.DataFrame.from_csv("backup_metoo_171221.csv") len(metoo_df) metoo_df.head() """ Explanation: For the continued analysis, I start from a csv-file that was created in this way. End of explanation """ newlist = [] for tweet in metoo_df['text']: #print(tweet) tweet = [word.lower() for word in tweet.split() if not word.startswith('#')] tweet = [word for word in tweet if not word.startswith('htt')] tweet = [word for word in tweet if not word.startswith('@')] tweet = [word for word in tweet if not '/' in word] tweet = [word for word in tweet if word.isalpha()] tweet = [word for word in tweet if not word == "rt"] tweet = [word.strip(".") for word in tweet] tweet = [word.strip() for word in tweet] tweet = [word for word in tweet if not word == "
"] tweet = " ".join(tweet) newlist.append(tweet) tweets = pd.Series(newlist) tweets metoo_df = metoo_df.reset_index(drop=True) metoo_df['tweets'] = tweets metoo_df.head() """ Explanation: Now, let's clean the tweets, and add the clean version of the tweets as a column. End of explanation """ daylist = [day[:10] for day in metoo_df.created_at] daylist days = pd.Series(daylist) metoo_df['day'] = days metoo_df.head() """ Explanation: We want to group the tweets by day, so add a column that says which day they were posted. End of explanation """ tweets_by_day = metoo_df['tweets'].groupby(metoo_df['day']) """ Explanation: Group the tweets (tweets) by day. End of explanation """ groups = list(tweets_by_day) """ Explanation: View the grouping. End of explanation """ groups[0][0] groups[0][1] """ Explanation: Now groups is a list of tuples where element [0] is the group name, and element [1] is a pandas series of all text belonging to the group. End of explanation """ for i in groups[41][1]: print(i) """ Explanation: The series is iterable. End of explanation """ os.getcwd() try: os.mkdir("tmfiles") except: print("Directory already exists") os.chdir("tmfiles") # run once os.getcwd() for c,i in enumerate(groups): o = open(str(groups[c][0]) + ".txt", "w") for line in i[1]: o.write(line + "\n") """ Explanation: For topic modelling, we create one text document per group (day in this case). End of explanation """ os.chdir('..') os.getcwd() """ Explanation: Now we have the topicmodelling documents. Go back to the previous working directory. End of explanation """
njtwomey/ADS
03_data_transformation_and_integration/04_survey_demo.ipynb
mit
import pandas as pd import numpy as np from datetime import datetime import matplotlib.pyplot as plt """ Explanation: Joining data from Google forms questionnaires. End of explanation """ columns = ['Datetime', 'ID', 'Course', 'Python_Experience', 'Favourite_Language'] df1 = pd.read_csv('Data Fusion.csv', names=columns, skiprows=1) df1.set_index('ID', inplace=True) df1.head() len(df1) df1.dtypes #type(df1.Datetime[0]) df1['Datetime'] = pd.to_datetime(df1['Datetime']) df1.index.is_unique ## if non-unique idex we can test inspect the duplicate indicies indices = df1.groupby(level=0) duplicate_indices = [] for index, gi in indices: if len(gi) > 1: print("Repeated index: ", index) duplicate_indices.append(gi) duplicate_indices[0] df1 = df1.loc[~df1.index.duplicated(keep='first')] print(df1.index.is_unique) df1.head() df1.index.is_unique """ Explanation: Let's get the data from the first survey: End of explanation """ colums = ['Datetime', 'ID', 'Course', 'Ethics_Importance', 'Female_Developers', 'Favourite_Movie', 'Fusion_versus_Wrangling'] df2 = pd.read_csv('Data Fusion 2.csv', names=colums, skiprows=1) df2.set_index('ID', inplace=True) df2['Datetime'] = pd.to_datetime(df2['Datetime']) df2.head() df2.index.is_unique ## if not unique...as above resultset1_ids = set(df1.index.unique()) resultset2_ids = set(df2.index.unique()) try: assert resultset1_ids == resultset2_ids except AssertionError: print("Warning: ID columns do not match exactly.") intersection = resultset1_ids.intersection(resultset2_ids) difference = resultset1_ids.difference(resultset2_ids) print(intersection) print(difference) df2.loc[df2.index.isin(intersection)] df1.loc[~df1.index.isin(intersection)].head() df2.loc[~df2.index.isin(intersection)].head() """ Explanation: Now let's get the results from the second survey: End of explanation """ dodgy_ids = ['MM'] replacement_ids = ['MJM'] replace = lambda x: x if x not in dodgy_ids else replacement_ids[dodgy_ids.index(x)] df1_corrected = df1.assign(NID = [replace(ii) for ii in df1.index]).set_index('NID') df2_corrected = df2.assign(NID = [replace(ii) for ii in df2.index]).set_index('NID') #df1.drop(columns=['NID'], inplace=True) #df2_corrected = df2 #df1_corrected = df1 resultset1_ids = set(df1_corrected.index.unique()) resultset2_ids = set(df2_corrected.index.unique()) intersection = resultset1_ids.intersection(resultset2_ids) difference = resultset1_ids.difference(resultset2_ids) print(intersection) print(difference) """ Explanation: It seems there are four people who filled out the first form but not the second. And one person who used a different ID on the second form 'MM' instead of 'MJM'...So we can fix that discrepancy and then either omit the four respondess (by using outer join) or included them but with the data from the second form missing (by using inner join). End of explanation """ df1_corrected.Course.value_counts() df2_corrected.Course.value_counts() set(df1_corrected.Course.unique()) == set(df2_corrected.Course.unique()) """ Explanation: Before we join the two dataframes let's check out the duplicated Course column: End of explanation """ #df1_corrected.drop(columns=['Datetime'], inplace=True) #df2_corrected.drop(columns=['Datetime', 'Course'], inplace=True) #joined = df1_corrected.loc[df1_corrected.index.isin(intersection)].merge(df2_corrected.loc[df2_corrected.index.isin(intersection)], left_index=True, right_index=True) joined = df1_corrected.merge(df2_corrected, left_index=True, right_index=True, how='outer') joined.head() ## To see the rows with missing data because they did not fill in the second form: joined.loc[~joined.index.isin(intersection)] """ Explanation: We see the problem of inconsistent encoding of categorical variables that was disussed in the lecture! Fixing this would take some manual effort. A better solution would be to collect the data different (e.g. multiple choice), but we don't always have control over the data collection. Now that we have a nice clean index we can join the two dataframes: End of explanation """ joined.loc[joined.Course_x != joined.Course_y] most_recent = lambda row: row.Course_x if ((row.Datetime_x > row.Datetime_y) & (pd.notnull(row.Course_x))) else row.Course_y joined = joined.assign(Course = [ci if ci==cj else most_recent(joined.iloc[ii]) for ii, (ci,cj) in enumerate(zip(joined.Course_x, joined.Course_y))]) joined.drop(columns=['Course_x', 'Course_y'], inplace=True) joined.head() joined.Course.value_counts() """ Explanation: We need to deal with the duplication in the Course variable. We take the most recently input value of 'Course' (if not null). End of explanation """ plt.figure(figsize=(12,6)) plt.subplot(1,2,1) plt.scatter(joined.Python_Experience, joined.Ethics_Importance) plt.xlabel('Python_Experience') plt.ylabel('Ethics_Importance'); plt.subplot(1,2,2) plt.scatter(joined.Python_Experience, joined.Female_Developers) plt.xlabel('Python_Experience') plt.ylabel('Female_Developers'); joined.Favourite_Language.value_counts() joined.Favourite_Movie.value_counts() gi = 6 groups = list(joined.groupby('Course')) print("There are %d courses." %len(groups)) plt.title(groups[gi][0]) groups[gi][1].Favourite_Language.value_counts().plot.pie() plt.axis('off'); joined.loc[joined['Course'] == 'Digital Health'] #joined[joined[['Course']] == 'Digital Health']#[['Favourite_Movie']] def plot_movie_pie(group_on='Course', gi='Computer Science'): plt.title(gi) joined.loc[joined[group_on] == gi, 'Favourite_Movie'].value_counts().plot.pie() plt.axis('off'); plot_movie_pie() #gi='Engineering Mathematics') """ Explanation: Now that the data sets are fused we can interogate them and continue the exploration... End of explanation """
f-guitart/data_mining
notes/98 - Data Storage and File Formats with Pandas.ipynb
gpl-3.0
import pandas as pd iqsize = pd.read_csv("https://raw.githubusercontent.com/f-guitart/data_mining/master/data/iqsize.csv") iqsize.head() type(iqsize) iqsize["sex"][:10] iqsize["sex"].to_csv("myseries.csv") %ls myseries.csv """ Explanation: Data reading and writting using Pandas We will focus on three formats to store our data in disk: * CSV: comma separated value. Two standard sparators: * comma: american * semicolon: eurpean * XLS: excel file (xls or xlsx) * Pickle: python serialized file format Pandas provide functions to load csv, xls and pickle files: * Reading: * pandas.read_csv(...) API Reference * pandas.read_excel(...) API Reference * pandas.read_pickle(...) API Reference * Writting: * object.to_csv(...) API Reference * object.to_excel(...) API Reference * object.to_pickle(...) API Reference Examples We will see some examples of file loading: CSV End of explanation """ iqsize.to_excel("iqsize.xlsx") %ls iqsize.xlsx xls_iqsize = pd.read_excel("iqsize.xlsx") xls_iqsize.head() """ Explanation: Excel With excel files we can start the other way round: writting first. End of explanation """ my_df = pd.DataFrame({"a" : [{"apples": [1,2,3,4,6], "pears":2}, None, None, {"bannanas":4}], "b" : [0,1,2,3]}) my_df.to_csv("mydf.csv") my_df2 = pd.read_csv("mydf.csv") type(my_df2.iloc[0,1]) my_df.to_pickle("mydf.pickle") my_df3 = pd.read_pickle("mydf.pickle") my_df3.head() type(my_df3.iloc[0,0]) train = pd.Series([1,2,3,4,5,6,7,8]) test = pd.Series([9,10,11]) pd.to_pickle({"train": train, "test" : test},"my_pickle.pickle") %ls my_pickle.pickle my_pickle = pd.read_pickle("my_pickle.pickle") my_pickle.keys() type(my_pickle['train']) """ Explanation: Pickle Why do we need serialized wirtting features? Becasue sometimes we do want to store wierd things: * DataFrames with dictionaries, lists or objects in columns * Dictionaries of dataframes End of explanation """
mbeyeler/opencv-machine-learning
notebooks/09.03-Getting-Acquainted-with-Deep-Learning.ipynb
mit
from keras.models import Sequential model = Sequential() """ Explanation: <!--BOOK_INFORMATION--> <a href="https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-opencv" target="_blank"><img align="left" src="data/cover.jpg" style="width: 76px; height: 100px; background: white; padding: 1px; border: 1px solid black; margin-right:10px;"></a> This notebook contains an excerpt from the book Machine Learning for OpenCV by Michael Beyeler. The code is released under the MIT license, and is available on GitHub. Note that this excerpt contains only the raw code - the book is rich with additional explanations and illustrations. If you find this content useful, please consider supporting the work by buying the book! <!--NAVIGATION--> < Implementing a Multi-Layer Perceptron (MLP) in OpenCV | Contents | Training an MLP in OpenCV to Classify Handwritten Digits > https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py Getting Acquainted with Deep Learning Back when deep learning didn't have a fancy name yet, it was called artificial neural networks. So you already know a great deal about it! This was a respected field in itself, but after the days of Rosenblatt's perceptron, many researchers and machine learning practitioners slowly began to lose interest in the field since no one had a good solution for training a neural network with multiple layers. With the current popularity of deep learning in both industry and academia, we are fortunate enough to have a whole range of open-source deep learning frameworks at our disposal: - Google Brain's TensorFlow: This is a machine learning library that describes computations as dataflow graphs. To date, this is one of the most commonly used deep learning libraries. Hence, it is also evolving quickly, so you might have to check back often for software updates. TensorFlow provides a whole range of user interfaces, including Python, C++, and Java interface. - Microsoft Research's Cognitive Toolkit (CNTK): This is a deep learning framework that describes neural networks as a series of computational steps via a directed graph. - UC Berkeley's Caffe: This is a pure deep learning framework written in C++, with an additional Python interface. - University of Montreal's Theano: This is a numerical computation library compiled to run efficiently on CPU and GPU architectures. Theano is more than a machine learning library; it can express any computation using a specialized computer algebra system. Hence, it is best suited for people who wish to write their machine learning algorithms from scratch. - Torch: This is a scientific computing framework based on the Lua programming language. Like Theano, Torch is more than a machine learning library, but it is heavily used for deep learning by companies such as Facebook, IBM, and Yandex. Finally, there is also Keras, which we will be using in the following sections. In contrast to the preceding frameworks, Keras understands itself as an interface rather than an end-toend deep learning framework. It allows you to specify deep neural nets using an easy-tounderstand API, which can then be run on backends, such as TensorFlow, CNTK, or Theano. Getting acquainted with Keras The core data structure of Keras is a model, which is similar to OpenCV's classifier object, except it focuses on neural networks only. The simplest type of model is the Sequential model, which arranges the different layers of the neural net in a linear stack, just like we did for the MLP in OpenCV: End of explanation """ from keras.layers import Dense model.add(Dense(1, activation='linear', input_dim=2, kernel_initializer='zeros')) """ Explanation: Then different layers can be added to the model one by one. In Keras, layers do not just contain neurons, they also perform a function. Some core layer types include the following: Dense: This is a densely connected layer. This is exactly what we used when we designed our MLP: a layer of neurons that is connected to every neuron in the previous layer. Activation: This applies an activation function to an output. Keras provides a whole range of activation functions, including OpenCV's identify function (linear), the hyperbolic tangent (tanh), a sigmoidal squashing function (sigmoid), a softmax function (softmax), and many more. Reshape: This reshapes an output to a certain shape. There are other layers that calculate arithmetic or geometric operations on their inputs: - Convolutional layers: These layers allow you to specify a kernel with which the input layer is convolved. This allows you to perform operations such as a Sobel filter or apply a Gaussian kernel in 1D, 2D, or even 3D. - Pooling layers: These layers perform a max pooling operation on their input, where the output neuron's activity is given by the maximally active input neuron. Some other layers that are popular in deep learning are as follows: - Dropout: This layer randomly sets a fraction of input units to zero at each update. This is a way to inject noise into the training process, making it more robust. - Embedding: This layer encodes categorical data, similar to some functions from scikit-learn's preprocessing module. - GaussianNoise: This layer applies additive zero-centered Gaussian noise. This is another way of injecting noise into the training process, making it more robust. A perceptron similar to the preceding one could thus be implemented using a Dense layer that has two inputs and one output. Staying true to our earlier example, we will initialize the weights to zero and use the hyperbolic tangent as an activation function: End of explanation """ model.compile(optimizer='sgd', loss='mean_squared_error', metrics=['accuracy']) """ Explanation: Finally, we want to specify the training method. Keras provides a number of optimizers, including the following: - stochastic gradient descent ('sgd'): This is what we have discussed before - root mean square propagation ('RMSprop'): This is a method in which the learning rate is adapted for each of the parameters - adaptive moment estimation ('Adam'): This is an update to root mean square propagation and many more In addition, Keras also provides a number of different loss functions: - mean squared error ('mean_squared_error'): This is what was discussed before - hinge loss ('hinge'): This is a maximum-margin classifier often used with SVM, as discussed in Chapter 6, Detecting Pedestrians with Support Vector Machines, and many more You can see that there's a whole plethora of parameters to be specified and methods to choose from. To stay true to our aforementioned perceptron implementation, we will choose stochastic gradient descent as an optimizer, the mean squared error as a cost function, and accuracy as a scoring function: End of explanation """ from sklearn.datasets.samples_generator import make_blobs X, y = make_blobs(n_samples=100, centers=2, cluster_std=2.2, random_state=42) """ Explanation: In order to compare the performance of the Keras implementation to our home-brewed version, we will apply the classifier to the same dataset: End of explanation """ model.fit(X, y, epochs=400, batch_size=100, shuffle=False, verbose=0) """ Explanation: Finally, a Keras model is fit to the data with a very familiar syntax. Here, we can also choose how many iterations to train for (epochs), how many samples to present before we calculate the error gradient (batch_size), whether to shuffle the dataset (shuffle), and whether to output progress updates (verbose): End of explanation """ model.evaluate(X, y) """ Explanation: After the training completes, we can evaluate the classifier as follows: End of explanation """ import numpy as np np.random.seed(1337) # for reproducibility """ Explanation: Here, the first reported value is the mean squared error, whereas the second value denotes accuracy. This means that the final mean squared error was 0.04, and we had 100% accuracy. Way better than our own implementation! End of explanation """
sdpython/actuariat_python
_doc/notebooks/sessions/2017_session6.ipynb
mit
from jyquickhelper import add_notebook_menu add_notebook_menu() """ Explanation: Session 26/6/2017 - machine learning Découverte des trois problÚmes de machine learning exposé dans l'article Machine Learning - session 6. End of explanation """ import pandas df = pandas.read_csv("data/housing.data", delim_whitespace=True, header=None) df.head() cols = "CRIM ZN INDUS CHAS NOX RM AGE DIS RAD TAX PTRATIO B LSTAT MEDV".split() df.columns = cols df.head() X = df.drop("MEDV", axis=1) y = df["MEDV"] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33) """ Explanation: ProblÚme 1 : comparaison random forest, linéaire C'est un problÚme de régression. On cherche à comparer une random forest avec un modÚle linéaire. Comparaison des tests de coefficients pour un modÚle linéaire OLS et des features importance Résultat au niveau d'une observation treeinterpreter Données : Housing, Forest Fire Données End of explanation """ from sklearn.ensemble import RandomForestRegressor clr = RandomForestRegressor() clr.fit(X, y) importances = clr.feature_importances_ importances """ Explanation: Random Forest End of explanation """ %matplotlib inline import matplotlib.pyplot as plt import numpy as np plt.figure(figsize=(12,4)) indices = np.argsort(importances)[::-1] std = np.std([tree.feature_importances_ for tree in clr.estimators_], axis=0) plt.title("Feature importances") plt.bar(range(X.shape[1]), importances[indices], color="r", yerr=std[indices], align="center") xlabels = list(df.columns[:-1]) xlabels = [xlabels[i] for i in indices] plt.xticks(range(X.shape[1]), xlabels) plt.xlim([-1, X.shape[1]]) plt.show() from sklearn.metrics import r2_score r2_score(y_train, clr.predict(X_train)) r2_score(y_test, clr.predict(X_test)) """ Explanation: On s'inspire de l'exemple Feature importances with forests of trees. End of explanation """ import statsmodels.api as sm model = sm.OLS(y_train, X_train) results = model.fit() results.params results.summary() model = sm.OLS(y,X.drop("LSTAT", axis=1)) results = model.fit() results.summary() """ Explanation: ModÚle linéaire End of explanation """ try: from tpot import TPOTRegressor except ImportError: # for sklearn 0.22 import sklearn.preprocessing from sklearn.impute import SimpleImputer sklearn.preprocessing.Imputer = SimpleImputer from tpot import TPOTRegressor tpot = TPOTRegressor(generations=2, population_size=50, verbosity=2) tpot.fit(X_train, y_train) print(tpot.score(X_test, y_test)) tpot.export('tpot_boston_pipeline.py') """ Explanation: TPOT TPOT est un module d'apprentissage automatique. End of explanation """ r2_score(y_train, tpot.predict(X_train)) r2_score(y_test, tpot.predict(X_test)) """ Explanation: Le module optimise les hyperparamÚtres, parfois un peu trop à en juger la mauvaise performance obtenue sur la base de test. End of explanation """ clr = RandomForestRegressor() clr.fit(X, y) from treeinterpreter import treeinterpreter as ti prediction, bias, contributions = ti.predict(clr, X_test) for i in range(min(2, X_train.shape[0])): print("Instance", i) print("Bias (trainset mean)", bias[i]) print("Feature contributions:") for c, feature in sorted(zip(contributions[i], df.columns), key=lambda x: -abs(x[0])): print(feature, round(c, 2)) print( "-"*20) """ Explanation: Feature importance pour une observations On reprend la premiÚre random forest et on utilise le module treeinterpreter. End of explanation """ import pandas df = pandas.read_csv("data/multiTimeline.csv", skiprows=1) df.columns= ["Semaine", "compte"] df["SemaineDt"] = pandas.to_datetime(df.Semaine) df=df.set_index("SemaineDt") df["compte"] = df["compte"].astype(float) df.head() %matplotlib inline df.plot() from statsmodels.tsa.arima_model import ARIMA arma_mod = ARIMA(df["compte"].values, order=(6 ,1, 1)) res = arma_mod.fit() res.params res.summary() """ Explanation: ProblÚme 2 : série temporelle On prend une série sur Google Trends, dans notre cas, c'est la requête tennis live. On compare une approche linéaire et une approche non linéaire. Approche linéaire End of explanation """ from statsmodels.tsa.tsatools import lagmat lag = 8 X = lagmat(df["compte"], lag) lagged = df.copy() for c in range(1,lag+1): lagged["lag%d" % c] = X[:, c-1] pandas.concat([lagged.head(), lagged.tail()]) xc = ["lag%d" % i for i in range(1,lag+1)] split = 0.66 isplit = int(len(lagged) * split) xt = lagged[10:][xc] yt = lagged[10:]["compte"] X_train, y_train, X_test, y_test = xt[:isplit], yt[:isplit], xt[isplit:], yt[isplit:] from sklearn.ensemble import RandomForestRegressor clr = RandomForestRegressor() clr.fit(X_train, y_train) from sklearn.metrics import r2_score r2 = r2_score(y_test.values, clr.predict(X_test)) r2 plt.scatter(y_test.values, clr.predict(X_test)); """ Explanation: Méthode non linéaire On construire la matrice des séries décalées. Cette méthode permet de sortir du cadre linéaire et d'ajouter d'autres variables. End of explanation """ from ensae_teaching_cs.data import twitter_zip df = twitter_zip(as_df=True) df.head(n=2).T """ Explanation: Texte On cherche à comparer une LDA avec word2vec et kmeans et les données qui sont sur ensae_teaching_cs/src/ensae_teaching_cs/data/data_web/. End of explanation """ keep = df.text.dropna().index dfnonan = df.iloc[keep, :] dfnonan.shape from sklearn.feature_extraction.text import TfidfVectorizer tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=1000) tfidf = tfidf_vectorizer.fit_transform(dfnonan["text"]) tfidf[:2, :] tfidf[:2, :].todense() """ Explanation: Des mots aux coordonnées - tf-idf End of explanation """ from sklearn.decomposition import LatentDirichletAllocation lda = LatentDirichletAllocation(n_components=10, max_iter=5, learning_method='online', learning_offset=50., random_state=0) lda.fit(tfidf) tf_feature_names = tfidf_vectorizer.get_feature_names() tf_feature_names[100:103] lda.components_.shape """ Explanation: LDA End of explanation """ def print_top_words(model, feature_names, n_top_words): for topic_idx, topic in enumerate(model.components_): print("Topic #%d:" % topic_idx) print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])) print() print_top_words(lda, tf_feature_names, 10) """ Explanation: On obtient dix vecteurs qui représentent les dix vecteurs associés aux dix clusters. Chaque dimension relié au fait que le mot appartient ou non au cluster. End of explanation """ from sklearn.cluster import KMeans km = KMeans(n_clusters=10) km.fit(tfidf) km.cluster_centers_.shape def print_top_words(model, feature_names, n_top_words): for topic_idx, topic in enumerate(model.cluster_centers_): print("Topic #%d:" % topic_idx) print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])) print() print_top_words(km, tf_feature_names, 10) """ Explanation: Clustering End of explanation """
stuser/temp
AI_Academy/trend_micro_basic_data_intro.ipynb
mit
import pandas as pd import numpy as np import matplotlib.pyplot as plt plt.rcParams['font.family']='SimHei' #顯瀺䞭文 %matplotlib inline import warnings warnings.filterwarnings('ignore') # Load in the train datasets train = pd.read_csv('input/training-set.csv', encoding = "utf-8", header=None) test = pd.read_csv('input/testing-set.csv', encoding = "utf-8", header=None) #query_log裡面被官方排陀的 FileID train_exc = pd.read_csv('input/exception/exception_train.txt', encoding = "utf-8", header=None) test_exc = pd.read_csv('input/exception/exception_testing.txt', encoding = "utf-8", header=None) test_exc.head(2) # from sklearn.preprocessing import Imputer # imputer = Imputer(missing_values='NaN', axis=0, strategy='mean') # imputer.fit_transform(X[:,[1,3]]) # training set - label: 0:非惡意皋匏, 1:惡意皋匏 train.columns=['FileID','label'] train.head(2) # testing set - AUC: Area Under ROC Curve test.columns=['FileID','AUC'] test.head(2) #確認排陀的FileID圚training set裡面扟䞍到 train[train['FileID'].isin(train_exc[0])] #取0301當倩的query log䟆查看 query_0301 = pd.read_csv('input/query_log/0301.csv', encoding = "utf-8", header=None) query_0301.columns=['FileID','CustomerID','QueryTs','ProductID'] query_0301['times'] = 1 query_0301.head(2) query_0301.describe() query_0301.info(memory_usage='deep') """ Explanation: 資料集簡介 <p>欄䜍說明:</p> <p>FileID: 檔案識別ID</p> <p>CustomerID: 䜿甚者裝眮識別ID</p> <p>QueryTs: 該筆資料癌生時間</p> <p>ProductID: 䜿甚者裝眮的產品代碌</p> End of explanation """ query_0301.pivot_table(values='times',index=['FileID'],columns='ProductID',aggfunc='sum') """ Explanation: 暞杻分析 End of explanation """ query_0301.groupby(['FileID','CustomerID','ProductID'])[['times']].sum() """ Explanation: 聚合凜敞 count個敞, sum加瞜, mean平均, median䞭䜍敞, std暙準差, var變異敞, first第䞀個非NA, last最埌䞀個非NA End of explanation """ #AUC蚈算範䟋 import numpy as np from sklearn import metrics y = np.array([1, 1, 2, 2]) pred = np.array([0.5, 1, 0.9, 1]) fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2) metrics.auc(fpr, tpr) #timestamp蜉換 import datetime print( datetime.datetime.fromtimestamp( int("1488326402") ).strftime('%Y-%m-%d %H:%M:%S')) """ Explanation: 其它會甚到的工具 End of explanation """
LSST-Supernova-Workshops/Pittsburgh-2016
Tutorials/QuickMC/Topic5_workbook.ipynb
mit
%matplotlib inline import sys, platform, os from matplotlib import pyplot as plt import numpy as np import astropy as ap import pylab as pl # we start by setting the cosmological parameters of interest, and reading in our data cosmoparams_orig = [70., 0.3, 0.7, -0.9, 0.2] redshift=np.arange(0.001,1.3,0.01) # a redshift vector for plotting etc. plot=False root = 'large_error' sndata = np.loadtxt('workshop_data_' + root+'.txt', unpack=True) sndata[2]=sndata[2] cov = np.diag(sndata[2]**2) # We will start by defining some functions to generate Ia data (for our model computation later) def gen_ia(cosmoparams, redshift=np.arange(0.01,1,0.1), plot=True): '''Code to simulate the SNeIa, taking input of cosmology params, redshift vector and a plotting flag''' from astropy.cosmology import w0waCDM import pylab as pl cosmo = w0waCDM(H0=cosmoparams[0], Om0=cosmoparams[1], Ode0=cosmoparams[2], w0=cosmoparams[3], wa=cosmoparams[4]) mu = cosmo.distmod(redshift).value if plot: pl.figure(figsize=(8,6)) pl.plot(redshift, mu, '-') pl.xlabel(r'redshift $z$', fontsize=20) pl.ylabel(r'$\mu(z)$', fontsize=20) pl.show() return mu # To check this works we generate some theory curve to match the data cosmoparams_orig = [70., 0.3, 0.7, -0.9, 0.2] mulcdm = gen_ia(cosmoparams_orig, redshift, plot) pl.figure(figsize=(8,6)) pl.xlabel(r'redshift $z$', fontsize=20) pl.ylabel(r'$\mu(z)$', fontsize=20) pl.errorbar(sndata[0], sndata[1],sndata[2],marker='.', color='m', linestyle='None') pl.plot(redshift, mulcdm) """ Explanation: Cosmological Analysis with SNe (Topic Five) Renée Hloşek Preparing for SN Science in the LSST era: a kick off workshop We are going to do a very rough example of an MCMC, using the <a href=" https://en.wikipedia.org/wiki/Metropolis%E2%80%93Hastings_algorithm"> Metroplis Hastings algorithm, </a> so that when you run more complicated code (eg. emcee) it makes sense intuitively! We will then do a comparison with a <a href="https://en.wikipedia.org/wiki/Fisher_information"> Fisher matrix technique </a> so we can compare them mentally. We will make sure that we have astropy installed: use pip install astropy MCMC End of explanation """ wvals = np.arange(-0.94,-0.84,0.03) pl.figure(figsize=(8,6)) cosmoparams=list(cosmoparams_orig) # changing oonly the wvalue for now for wval in wvals: cosmoparams[3] = wval mu = gen_ia(cosmoparams, redshift, plot=False) pl.plot(redshift, (mu-mulcdm)/mulcdm, label=r'$w_0=%s$'%wval) pl.xlabel(r'redshift $z$', fontsize=20) pl.ylabel(r'$\Delta \mu(z)$', fontsize=20) leg = pl.legend(loc='best') leg.draw_frame(False) """ Explanation: Now that we have called our distance modulus once and know how to do it, we can also compute it over a range of parameters. End of explanation """ ## Fake likelihood for LSST SN data def sn_likelihood(cosmoparams, loaddata=True): if loaddata: # if it is the first time, load the data data = np.loadtxt('workshop_data_' + root+'.txt', unpack=True) redshift = data[0] # we are just assuming that the distance modulus is at the same redshift as the binned value modelmu = gen_ia(cosmoparams, redshift, plot=False) num_sn=len(data[0]) loglike = (data[1]-modelmu)**2/(2.*data[2]**2) loglike=-np.sum(loglike,axis=0) return loglike, num_sn ## Define a prior while you are sampling so you don't go to weird places (ie negative Omega_m) def snprior(cosmoparams): p = -np.ones(len(cosmoparams)) # Gaussian priors #p[0] = -((cosmoparams[0])-70.)**2/(2.*5)**2 p[1] = -((cosmoparams[1])-0.3)**2/(2.*0.02)**2 #p[2] = -((cosmoparams[2])-0.7)**2/(2.*0.02)**2 p[3] = -((cosmoparams[2])+0.9)**2/(2.*0.7)**2 # hard cuts if ((cosmoparams[0]< 50) or (cosmoparams[0]> 100)): p[0] = -3000 if (cosmoparams[1] < 0): p[1] = -3000 if (cosmoparams[2]< 0): p[2] =-3000 if (cosmoparams[3]< -2): p[3] = -3000 if ((cosmoparams[4]< -2) or (cosmoparams[4]> 2)): p[4] = -3000 pp = sum(p) return pp """ Explanation: Now let's make a fake likelihood for the SN data. Of course this is over-simplified, we know that the errors are correlated, and that we need to account for the LC parameters too. End of explanation """ # Your code here """ Explanation: The first thing we notice is that we shouldn't just be taking the model spectrum at that bin, but we should be binning the theory. <font color='red'>EXCERCISE if interested: </font> Write a module to bin the theory over the same redshift range as the binned data. End of explanation """ # Let's call the module for the spectrum we have above. model = list(cosmoparams_orig) loglike, num_sn = sn_likelihood(model) print loglike, num_sn, loglike/num_sn """ Explanation: Your discussion here. End of explanation """ # Using this code above, we can take a gaussian step specified by the step vector below stepvec = np.array([0.0,0.0001, 0.02, 0.005, 0.001]) nsteps = 2 loglike = np.zeros(nsteps) for i in range(nsteps): if i==0: # First step step = list(cosmoparams_orig) else: # Take a Gaussian step from the previous position step = step+np.random.randn(len(cosmoparams))*stepvec model=step loglike[i], num_sn = sn_likelihood(model) print 'loglike vector =', 2*loglike """ Explanation: We want to take a step in this 5-D parameter space specified by the step vector. End of explanation """ def mcmc_mh(ratln): accept=False r1 = np.random.rand() # If the step is definitely better, we want to accept it. # If it isn't necessarily better, we want to throw a random number and step if we exceed it if np.exp(ratln) > r1: accept=True return accept # Using this code above, we can take a gaussian step specified by the step vector below if (root=='large_error'): stepvec = np.array([0.0,0.03, 0.0, 0.005, 0.0]) else: stepvec = np.array([0.0,0.005, 0.0, 0.001, 0.0]) paramsvec=np.array(cosmoparams_orig) steps = 10000 loglike = np.zeros(steps) prior = np.zeros(steps) post = np.zeros(steps) stepskeep = np.zeros((steps,len(paramsvec)+1)) accept_count=0 for i in range(steps): if i==0: step = np.array(paramsvec) accept=True model=list(step) loglike[i], num_sn = sn_likelihood(model) prior[i] = snprior(model) post[i] = loglike[i]+prior[i] stepskeep[i,0:len(paramsvec)] = np.array(step) stepskeep[i,len(paramsvec)]= loglike[i] else: step = stepskeep[i-1,0:len(paramsvec)]+np.random.randn(len(paramsvec))*stepvec model=list(step) prior[i] = snprior(model) if (prior[i]>-3000): loglike[i], num_sn = sn_likelihood(model) post[i] = loglike[i]+prior[i] rat = post[i]-post[i-1] accept = mcmc_mh(rat) else: accept=False if accept: stepskeep[i,0:len(paramsvec)] = np.array(step) stepskeep[i,len(paramsvec)] = loglike[i] accept_count+=1 else: stepskeep[i,0:len(paramsvec)] = stepskeep[i-1,0:len(paramsvec)] loglike[i] = loglike[i-1] stepskeep[i,len(paramsvec)] = loglike[i] if (steps%i ==0): print 'acceptance ratio = ', accept_count/float(i), 'steps taken = ', i np.savetxt('chain_'+root+'.txt', stepskeep, delimiter=' ', fmt='%.3e') print 'we are done' """ Explanation: MCMC We are now ready to do the MCMC. We'll define the simplest/ugliest version of the Metropolis Hastings algorithm: End of explanation """ ## Your code here """ Explanation: We don't actually want to read in the data every time. <font color='red'>EXCERCISE if interested: </font> Change the likelihood function to only read in the data the first time it is called. End of explanation """ # Read in the chain import corner chain = np.loadtxt('chain_'+root+'.txt', unpack=True) burn = np.int(0.5*len(chain[0,:])) # burn off some initial part of the chain pl.figure(2) sigma = 1.0 chain_colour = '#5AB1BB' binnum=40 newchain = np.zeros((len(chain[0,burn:]),2)) newchain[:,0] = chain[1,burn:] newchain[:,1] = chain[3,burn:] fig1 = corner.corner(newchain, color=chain_colour,smooth1d=2,smooth=2,plot_datapoints=False,levels=(1-np.exp(-0.5),1-np.exp(-2.)), density=True,bins=binnum, labels=[r'$\Omega_m$', r'$w_0$']) fig1.figsize=[20,20] #rcParams["figure.figsize"] = [10,10] pl.savefig(root+'.png') print 'mean for om:', np.mean(chain[1,burn:]), np.std(chain[1,burn:]) print 'mean for w0:', np.mean(chain[3,burn:]), np.std(chain[3,burn:]) """ Explanation: Now that we've run the chain, let's analyse it to see what the constraints look like End of explanation """ # We start by having a model that will change the cosmology within the Fisher matrix def assign_cosmo(cosmo,model=[70, 0.3,0.7, -0.9, 0.2]): import astropy as ap from astropy.cosmology import Planck15, Flatw0waCDM ob0=0.022 om0=model[1] ode0 =model[2] newcosmo = cosmo.clone(name='temp cosmo', H0=model[0], Ob0=ob0, Om0=om0, Ode0=ode0, w0=model[3], wa=model[4]) #print newcosmo.Ok0 return newcosmo # Define code that returns the mu and the Fisher matrix def fish_deriv_m(redshift, model, step): "takes the model vector - for now [h0,om,ok,w0,wa], step vector (0 if not step) \ data vector and gives back the derivs and the base function value at those \ redshifts" from astropy.cosmology import w0waCDM from astropy import constants as const import pylab as pl Ob0=0.022 Om0=model[1] Ode0 =model[2] cosmo = w0waCDM(model[0], Ob0, Om0, Ode0, model[3],model[4]) cosmo=assign_cosmo(cosmo, model) #print cosmo.Ok0 m = [] m_deriv = [] c = const.c.to('km/s') base_theory = cosmo.distmod(redshift) m = base_theory.value step_inds = np.where(step)[0] # look for non-zero step indices deriv = np.zeros((len(base_theory), len(model))) if (step_inds.size==0): print 'No steps taken, abort' exit else: print '\n' print 'Computing Fisher derivatives...' for i, stepp in enumerate(step_inds): print 'we are stepping in :', model[stepp], ' with step size', step[stepp] cosmo = assign_cosmo(cosmo, model) theory = np.zeros((len(base_theory),2)) for count,j in enumerate([-1,1]): tempmodel = list(model) tempmodel[stepp] = model[stepp] + j*step[stepp] #print tempmodel c = const.c.to('km/s') cosmo = assign_cosmo(cosmo, tempmodel) tmp = cosmo.distmod(redshift) theory[:,count] = tmp.value deriv[:,stepp] = (theory[:,1] - theory[:,0])/(2.*step[stepp]) m_deriv = deriv return m, m_deriv stepvec = np.array([0, 0.001, 0.00, 0.1, 0.0]) model = [70., 0.3, 0.7, -0.9, 0.2] names = ['hubble', 'omega_m', 'omega_de', 'w0', 'wa'] step_inds = np.where(stepvec)[0] fishermu, deriv = fish_deriv_m(sndata[0], model, stepvec) pl.errorbar(sndata[0],sndata[1], sndata[2], marker='.', linestyle='None') pl.plot(sndata[0], fishermu, marker='*', color='r', linestyle='None') # lets plot the Fisher derivaties for interest for i in step_inds: pl.plot(sndata[0], deriv[:,i]/fishermu, label=names[i],marker='.', linestyle='None') leg = pl.legend(loc='best', numpoints=1) leg.draw_frame(False) # We are setting up the covariance data for the Fishermatrix cov = np.diag(sndata[2]**2) inv_cov = np.diag(1./sndata[2]**2.) # Initialising the Fisher Matrix FM = np.zeros((len(step_inds), len(step_inds), len(sndata[2]) )) # Compute the Fisher matrix for i in range(len(step_inds)): # loop over variables for j in range(len(step_inds)): # loop over variables for k in range(len(sndata[0])): # loop over redshifts invcov = inv_cov[k,k] FM[i,j,k] = np.dot(np.dot(deriv[k,step_inds[i]], invcov), deriv[k,step_inds[j]]) # sum over the redshift direction fishmat = np.sum(FM,axis=2) # Compute the prior matrix prior_vec = np.array([0.1, 0.02, 0.0006, 0.2, 0.2]) priormat = np.diag(1./prior_vec[step_inds]**2.) final_FM = fishmat + priormat covmat = np.linalg.inv(final_FM) sigma = np.sqrt(covmat.diagonal()) print 'Fisher matrix results' print 'error for om:', sigma[0] print 'error for w0:', sigma[1] print 'MCMC results' print 'error for om:', np.std(chain[1,:]) print 'error for w0:', np.std(chain[3,:]) """ Explanation: Fisher matrix In this case we want to compute the Fisher derivatives for a given parameter of interest, and include the errors that are simulated and then forecast the constraints on the parameters around an assumed model. <img src="fisher.png"> End of explanation """ ## Print differece print np.std(chain[1,:])/sigma[0] print np.std(chain[3,:])/sigma[1] """ Explanation: In general the Cramer-Rao bound states that the Fisher matrix will always be smaller than the MCMC bound. The smaller the errorbars - the more Gaussian the contours and the more the FM contours agree with the MCMC ones! End of explanation """
semio/ddf_utils
examples/etl/migrant.ipynb
mit
import numpy as np import pandas as pd # from ddf_utils.dsl import * source = '../source/UN_MigrantStockByOriginAndDestination_2019.xlsx' """ Explanation: Create DDF dataset from UN International migrant stock 2019 dataset In this notebook we are going to demonstrate how to create a DDF dataset with ddf_utils. We will use International migrant stock: The 2019 revision as source and convert the migrant stock By destination and origin. Below are screenshots for this source file. Data Tables: ANNX sheet: The plan from the excel tables we can see there is a "ANNEX" sheet, which contains all country/region information. We will convert it to a geo domain. 3 data Tables contains total/male/female data. We will create a domain for gender. origin and destination are both regions/countries. So they will be roles in the region/country domain. there is only one indicator in source: migrant_stock. But when origin = total or destination = total, we can have immigrant_stock or emmigrant_stock. So we will have 3 indicators in the DDF dataset. Also each indicator will have 2 variants: one with gender domain and one without gender domain. We will not keep Notes/Type of Data/Sort Order columns ETL process Note: This notebook assumed you create a ddf dataset project with ddf new command and place the source file in etl/source and this notebook in etl/notebooks End of explanation """ # NOTE: this function will be re-written below. def read_source(sheet, skip=14, header=[0, 1]): return pd.read_excel(source, skiprows=skip, sheet_name=sheet, header=header, na_values=['..', ''], keep_default_na=False) # now try to load Table 1 table1 = read_source('Table 1', 14) table1.head() # function to fix column names def fix_column_names(df_): df = df_.copy() new_cols = np.r_[ df.columns.get_level_values(0).values[:6], # column 6 is `Total` df.columns.get_level_values(1).values[6:]] df.columns = new_cols return df fix_column_names(table1).head() table1 = fix_column_names(table1) # see data types. table1.dtypes['Total':] """ Explanation: first of all, let's create a data reader to load a table into pandas DataFrame with correct data types. i.e. data starts from the Total column should all be numbers(float64) We can see there are a few things we should process: we should skip a few lines at the beginning. there are .. inside number columns. we should consider .. as N/A values. the headers in data table span over 2 rows End of explanation """ def isnumeric(x): """check if a value is numeric value""" if isinstance(x, (np.floating, np.int, int, float)): return True else: return False alb = table1['Albania'].dropna() alb.loc[~alb.map(isnumeric)] """ Explanation: Now there is a problem. The Albania column have object type which is not the desired data type. We need to double check what goes wrong. Note Besides checking data types, we should also check if the numbers are loaded correctly ("153,011,473" in excel table should be 153011473 in the DataFrame). We can check this by viewing the source excel and the DataFrame side by side. Depends on the actual data you are working on, other kind of double checking might be required. End of explanation """ # redefine the read_source function def read_source(sheet, skip=14, header=[0, 1]): return pd.read_excel(source, skiprows=skip, sheet_name=sheet, header=header, na_values=['..', '', '-'], keep_default_na=False) table1 = fix_column_names(read_source('Table 1', 14)) all([x == 'float64' for x in table1.dtypes['Total':]]) table2 = fix_column_names(read_source('Table 2', 14)) all([x == 'float64' for x in table2.dtypes['Total':]]) table3 = fix_column_names(read_source('Table 3', 14)) all([x == 'float64' for x in table3.dtypes['Total':]]) """ Explanation: This means that there are - in the data table. We will treat it as N/A. End of explanation """ from ddf_utils.model.ddf import Entity, EntityDomain from ddf_utils.str import to_concept_id country = read_source('ANNEX', 15, 0) country.head() country.columns # load all entities ents = [] current_set = None sets_in_col = ['More Developed Regions', 'Less Developed Regions', 'Least developed countries', 'High-income Countries', 'Middle-income Countries', 'Upper-middle-income Countries', 'Lower-middle-income Countries', 'Low-income Countries', 'No income group available', 'Sub-Saharan Africa'] for i, row in country.iterrows(): name = row['Region, subregion, country or area'] if pd.isnull(row['Code']): # it's a set, and rows below this row belong to this set. current_set = to_concept_id(name) continue else: sets = set() for s in sets_in_col: if not pd.isnull(row[s]): sets.add(to_concept_id(s)) if current_set: sets.add(current_set) ents.append(Entity(id=str(int(row['Code'])), domain='geo', sets=list(sets), props={'name': name})) # Noticed that in data table there are "other south" and "other north", which are not in the ANNEX table. # So we append these 2 entity too ents.append( Entity(id='other_south', domain='geo', sets=[], props=dict(name='Other South'))) ents.append( Entity(id='other_north', domain='geo', sets=[], props=dict(name='Other North'))) domain = EntityDomain('geo', []) for e in ents: domain.add_entity(e) country_df = pd.DataFrame.from_dict(domain.to_dict()) country_df.head() # check: if origin and destination share same entities origin_cols = table1.columns[9:] for c in origin_cols: if c not in country_df['name'].values: print(f"missing {c}") else: print("all countries in Table 1 are in country domain") # save to file. # country_df.to_csv('../../ddf--entities--geo.csv', index=False) """ Explanation: geo domain Now let's load the ANNEX table and create geo domain: we will use code as identifier. Now the column values are numbers, we will convert to str. rows where code is empty are group names (for example UN development groups). We will treat them as entity sets columns are indicators for entity set membership too. In ddf_utils there are data classes for Entity and EntityDomain. End of explanation """ # gender domain sexdf = pd.DataFrame([ ['male', 'Male'], ['female', 'Female'] ], columns=['gender', 'name']) # sexdf.to_csv('../../ddf--entities--gender.csv', index=False) """ Explanation: Gender domain the gender domain is quite simple. We would just create a DataFrame manually. End of explanation """ # create a mapping from name to code. def _trans(x): if not pd.isnull(x): return str(int(x)) return x name_id_map = country.set_index('Region, subregion, country or area')['Code'].map(_trans).to_dict() # there are some area doesn't have codes, set here name_id_map['Total'] = '900' name_id_map['Other South'] = 'other_south' name_id_map['Other North'] = 'other_north' dp_cols = ['Code', 'Year'] for c in origin_cols: dp_cols.append(c) pd.Series(dp_cols) dp = table1[dp_cols].copy() dp = dp.dropna(subset=['Code', 'Year'], how='any') dp['Code'] = dp['Code'].map(int) dp['Year'] = dp['Year'].map(int) dp = dp.set_index(['Code', 'Year']) # rename remaining columns to geo id dp.columns = dp.columns.map(name_id_map) # create new dimension dp = dp.stack() dp.index.names = ['destination', 'year', 'origin'] dp.name = 'migrant_stock' dp.head() # double check: if there are duplicated index dp.index.has_duplicates # seems something goes wrong. digging in... dp[dp.index.duplicated(keep=False)].sort_index() dp[dp.index.duplicated(keep=False)].reset_index()['destination'].unique() # so only country id 909 has duplication. let's see what 909 means country[country['Code'] == 909] table1[table1['Code'] == 909] # so Oceania appear twice, and data values are same for both. We can safely drop these duplicates. # create a function for data tables. def create_datapoints(table): origin_cols = table.columns[6:] for c in origin_cols: # double check: if the country/region in the geo domain if c not in country_df['name'].values: print(f'column {c} is not in geo domain!') dp_cols = ['Code', 'Year'] [dp_cols.append(x) for x in origin_cols] dp = table[dp_cols].copy() dp = dp.dropna(subset=['Code', 'Year'], how='any') dp['Code'] = dp['Code'].map(int) dp['Year'] = dp['Year'].map(int) dp = dp.set_index(['Code', 'Year']) dp.columns = dp.columns.map(name_id_map) dp = dp.stack().reset_index() # print(dp.columns) dp.columns = ['destination', 'year', 'origin', 'migrant_stock'] dp = dp.drop_duplicates() return dp[['destination', 'origin', 'year', 'migrant_stock']] # also function for emrgrant def create_emgire_datapoints(dp): dp_emg = dp[(dp.destination == 900)].copy() dp_emg = dp_emg[['origin', 'year', 'migrant_stock']] dp_emg.columns = ['geo', 'year', 'emigrant_stock'] return dp_emg # and immigrant def create_imgire_datapoints(dp): dp_img = dp[(dp.origin == '900')].copy() dp_img = dp_img[['destination', 'year', 'migrant_stock']] dp_img.columns = ['geo', 'year', 'immigrant_stock'] return dp_img df = create_datapoints(table1) df_emg = create_emgire_datapoints(df) df_img = create_imgire_datapoints(df) df.head() # check: if there are still duplicated index np.all(df.duplicated(subset=['destination', 'origin', 'year'])) df_emg.head() df_img.head() # save to files # df.to_csv('../../ddf--datapoints--migrant_stock--by--destination--origin--year.csv', index=False) # df_emg.to_csv('../../ddf--datapoints--emigrant_stock--by--geo--year.csv', index=False) # df_img.to_csv('../../ddf--datapoints--immigrant_stock--by--geo--year.csv', index=False) # Table 2 and Table 3 # We will create gender dimension for them and merge them together. df2 = create_datapoints(table2) df2_emg = create_emgire_datapoints(df2) df2_img = create_imgire_datapoints(df2) df2['gender'] = 'male' df2 = df2[['destination', 'origin', 'gender', 'year', 'migrant_stock']] df2_emg['gender'] = 'male' df2_emg = df2_emg[['geo', 'gender', 'year', 'emigrant_stock']] df2_img['gender'] = 'male' df2_img = df2_img[['geo', 'gender', 'year', 'immigrant_stock']] df3 = create_datapoints(table3) df3_emg = create_emgire_datapoints(df3) df3_img = create_imgire_datapoints(df3) df3['gender'] = 'female' df3 = df3[['destination', 'origin', 'gender', 'year', 'migrant_stock']] df3_emg['gender'] = 'female' df3_emg = df3_emg[['geo', 'gender', 'year', 'emigrant_stock']] df3_img['gender'] = 'female' df3_img = df3_img[['geo', 'gender', 'year', 'immigrant_stock']] df_sex = pd.concat([df2, df3], ignore_index=True) df_sex_emg = pd.concat([df2_emg, df3_emg], ignore_index=True) df_sex_img = pd.concat([df2_img, df3_img], ignore_index=True) # save to files # df_sex.to_csv('../../ddf--datapoints--migrant_stock--by--destination--origin--gender--year.csv', index=False) # df_sex_emg.to_csv('../../ddf--datapoints--emigrant_stock--by--geo--gender--year.csv', index=False) # df_sex_img.to_csv('../../ddf--datapoints--immigrant_stock--by--geo--gender--year.csv', index=False) """ Explanation: datapoints Table 1-3 are loaded into DataFrame, we need to do some transformation to make them datapoints: drop unneeded columns converted origins/destinations names to codes origins are in columns, but should be in rows as a dimension for table 2 and table 3, add the gender column and merge them together filter origin = total (destination = total) to create immigrant (emigrant) data End of explanation """ from ddf_utils.model.ddf import Concept strings = [ ['name', 'Name'], ['domain', 'Domain'], ] measures = [ ['migration_stock', 'Migration Stock'], ['emigrant_stock', 'Emigrant Stock'], ['immigrant_stock', 'Immigrant Stock'] ] entity_domains = [ ['geo', 'Geo Location'], ['gender', 'Gender'] ] entity_sets_geo_ids = list(map(lambda x: x[4:], filter(lambda x: x.startswith('is--'), country_df.columns))) entity_sets_geo_names = list(map(lambda x: x.replace('_', ' ').title(), entity_sets_geo_ids)) entity_sets_geo = list(zip(entity_sets_geo_ids, entity_sets_geo_names)) roles = [ ['destination', 'Destination'], ['origin', 'Origin'] ] concepts = list() for e, n in strings: concepts.append(Concept(e, 'string', dict(name=n))) for e, n in measures: concepts.append(Concept(e, 'measure', dict(name=n))) for e, n in entity_domains: concepts.append(Concept(e, 'entity_domain', dict(name=n))) for e, n in entity_sets_geo: concepts.append(Concept(e, 'entity_set', dict(name=n, domain='geo'))) for e, n in roles: concepts.append(Concept(e, 'role', dict(name=n, domain='geo'))) concepts.append(Concept('year', 'time', dict(name='Time'))) cdf = pd.DataFrame.from_records([x.to_dict() for x in concepts]) # cdf.to_csv('../../ddf--concepts.csv', index=False) # create datapackage # run below ddf command in a terminal !ddf create_datapackage --update ../../ """ Explanation: Concepts The source file doesn't come with the properties of concepts so we need to create it manually. End of explanation """
BrownDwarf/ApJdataFrames
notebooks/Rebull2016b.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import pandas as pd pd.options.display.max_columns = 150 %config InlineBackend.figure_format = 'retina' import astropy from astropy.table import Table from astropy.io import ascii import numpy as np """ Explanation: ApJdataFrames Rebull 2016 a & b and Stauffer 2016 Titles: a.: Rotation in the Pleiades with K2: I. Data and First Results b.: ROTATION IN THE PLEIADES WITH K2. II. MULTIPERIOD STARS c.: ROTATION IN THE PLEIADES WITH K2. III. SPECULATIONS ON ORIGINS AND EVOLUTION Authors: L. M. Rebull, J. R. Stauffer, J. Bouvier, A. M. Cody, L. A. Hillenbrand, et al. Data are from multiple papers, including: http://iopscience.iop.org/article/10.3847/0004-6256/152/5/114/meta End of explanation """ #! wget http://iopscience.iop.org/1538-3881/152/5/113/suppdata/ajaa2e04t2_mrt.txt #! mv ajaa2e04t2_mrt.txt ../data/Rebull2016/ a_tab2 = ascii.read('../data/Rebull2016/ajaa2e04t2_mrt.txt') a_tab2.show_in_notebook(display_length=5) a_tab2.write('../data/Rebull2016/a_tab2.csv', format='ascii.csv', overwrite=True) """ Explanation: Rebull et al. 2016a, Table 2: Periods, Supporting Data, and Light Curve Categories for Periodic Pleiades End of explanation """ #! wget http://iopscience.iop.org/1538-3881/152/5/114/suppdata/ajaa2e05t2_mrt.txt #! mv ajaa2e05t2_mrt.txt ../data/Rebull2016/ b_tab2 = ascii.read('../data/Rebull2016/ajaa2e05t2_mrt.txt') b_tab2.show_in_notebook(display_length=5) b_tab2.write('../data/Rebull2016/b_tab2.csv', format='ascii.csv', overwrite=True) """ Explanation: The 759 element sample is the "Periodic Sample": The subset of all high-confidence ("best") plus lower-confidence ("ok") members that are neither too bright nor too faint (6 < ${K}_{{\rm{s}}}$ < 14.5) and are found to be periodic by us in these K2 data. The rows in this table are repeated in 2016b, table 2, but with some extra features. Rebull et al. 2016b, Table 2: Periods, Supporting Data, and Light Curve Categories for Periodic Pleiades End of explanation """ #! wget http://iopscience.iop.org/1538-3881/152/5/113/suppdata/ajaa2e04t7_mrt.txt #! mv ajaa2e04t7_mrt.txt ../data/Rebull2016/ b_tab7 = ascii.read('../data/Rebull2016/ajaa2e04t7_mrt.txt') """ Explanation: Rebull 2016b Table 7. Online Cross-identifications list End of explanation """ b_tab7.show_in_notebook(display_length=5) """ Explanation: The last column is a string of references, which can be pretty long. Let's ignore it for better display. End of explanation """ b_tab7.to_pandas().to_csv('../data/Rebull2016/Rebull2016b_tbl7.csv', index=False) """ Explanation: A useful table containing the various alternative names for objects. End of explanation """ #! wget http://iopscience.iop.org/1538-3881/152/5/115/suppdata/ajaa2dfet3_mrt.txt #! mv ajaa2dfet3_mrt.txt ../data/Rebull2016/ajaa2dfet3_mrt.txt c_tab3 = ascii.read('../data/Rebull2016/ajaa2dfet3_mrt.txt') c_tab3.show_in_notebook(display_length=5) """ Explanation: Stauffer et al. 2016 End of explanation """ df_a = a_tab2.to_pandas() df_b = b_tab2.to_pandas() df_c = c_tab3.to_pandas() """ Explanation: Convert everything to pandas, merge, save. It is convenient to merge the datasets into a single flat file of features and rows. End of explanation """ #df_a.head() for col in ['memb', 'LC']: df_a[col] = df_a[col].astype('category') """ Explanation: Clean the datasets: - Replace nonstandard NaN representations with IEEE standard NaN - Rename columns if necessary - Map categorical values to a pandas Categorical End of explanation """ new_in_paper_II = ['Per', 'dd', 'ddm', 'shch', 'beat', 'cpeak', 'resc', 'resd', 'dscu', 'cloud'] df_b = df_b[['EPIC'] + new_in_paper_II] df_b.columns #df_b.head() for col in ['Per', 'dd', 'ddm','shch', 'beat', 'cpeak', 'resc', 'resd', 'dscu', 'cloud']: df_b[col] = df_b[col].astype('category') """ Explanation: That one looks good. Let's keep only the columns that were new in paper II. End of explanation """ df_b.columns df_ab = pd.merge(df_a.rename(columns={'Per':'Per_lit'}), df_b.rename(columns={'Per':'Per_cat'}), how='outer', on='EPIC') """ Explanation: Try to merge these two dataframes to see if the periods match up. End of explanation """ #df_ab.head() """ Explanation: Spot check: Did the merge produce any rows in only one or the other dataframes? Nope! :) End of explanation """ df_c.head() df_c[df_c.isin([-9.99, -9.0])] = np.NaN df_c['Name'][df_c['Name']=='x'] = np.NaN """ Explanation: Clean the Stauffer et al. table. End of explanation """ df_abc = pd.merge(df_ab.rename(columns={'Name':'Name_IAU', 'Ksmag':'Ksmag_R16'}), df_c.rename(columns={'Name':'Name_adopt','Ksmag':'Ksmag_S16'}), how='left', on='EPIC') df_abc.columns #plt.plot(df_abc['(V-K)0'], df_abc['(V-K)'], '.') """ Explanation: We only want to keep information for the periodic sources from Rebull et al. 2016b. End of explanation """ df_abc.to_csv('../data/Rebull2016/Rebull_Stauffer_merge.csv', index=False) """ Explanation: Pretty much the same, except for a few values. Save the dataframe. End of explanation """
BrainIntensive/OnlineBrainIntensive
resources/nipype/nipype_tutorial/notebooks/basic_interfaces.ipynb
mit
%pylab inline from nilearn.plotting import plot_anat plot_anat('/data/ds102/sub-01/anat/sub-01_T1w.nii.gz', title='original', display_mode='ortho', dim=-1, draw_cross=False, annotate=False) """ Explanation: Interfaces In Nipype, interfaces are python modules that allow you to use various external packages (e.g. FSL, SPM or FreeSurfer), even if they themselves are written in another programming language than python. Such an interface knows what sort of options an external program has and how to execute it. To illustrate why interfaces are so useful, let's have a look at the brain extraction algorithm BET from FSL. Once in its original framework and once in the Nipype framework. BET in the origional framework Let's take a look at our T1 image on which we want to run BET. End of explanation """ %%bash FILENAME=/data/ds102/sub-01/anat/sub-01_T1w bet ${FILENAME}.nii.gz ${FILENAME}_bet.nii.gz """ Explanation: In its simplest form, you can run BET by just specifying the input image and tell it what to name the output image: bet &lt;input&gt; &lt;output&gt; End of explanation """ plot_anat('/data/ds102/sub-01/anat/sub-01_T1w_bet.nii.gz', title='original', display_mode='ortho', dim=-1, draw_cross=False, annotate=False) """ Explanation: Let's take a look at the results: End of explanation """ %%bash bet """ Explanation: Perfect! Exactly what we want. Hmm... what else could we want from BET? Well, it's actually a fairly complicated program. As is the case for all FSL binaries, just call it with no arguments to see all its options. End of explanation """ %%bash FILENAME=/data/ds102/sub-01/anat/sub-01_T1w bet ${FILENAME}.nii.gz ${FILENAME}_bet.nii.gz -m plot_anat('/data/ds102/sub-01/anat/sub-01_T1w_bet_mask.nii.gz', title='original', display_mode='ortho', dim=-1, draw_cross=False, annotate=False) """ Explanation: We see that BET can also return a binary brain mask as a result of the skull-strip, which can be useful for masking our GLM analyses (among other things). Let's run it again including that option and see the result. End of explanation """ from nipype.interfaces.fsl import BET """ Explanation: Now let's look at the BET interface in Nipype. First, we have to import it. BET in the Nipype framework So how can we run BET in the Nipype framework? First things first, we need to import the BET class from Nipype's interfaces module: End of explanation """ skullstrip = BET() skullstrip.inputs.in_file = "/data/ds102/sub-01/anat/sub-01_T1w.nii.gz" skullstrip.inputs.out_file = "/data/ds102/sub-01/anat/T1w_nipype_bet.nii.gz" res = skullstrip.run() """ Explanation: Now that we have the BET function accessible, we just have to specify the input and output file. And finally we have to run the command. So exactly like in the original framework. End of explanation """ plot_anat('/data/ds102/sub-01/anat/T1w_nipype_bet.nii.gz', title='original', display_mode='ortho', dim=-1, draw_cross=False, annotate=False) """ Explanation: If we now look at the results from Nipype, we see that it is exactly the same as before. End of explanation """ print skullstrip.cmdline """ Explanation: This is not surprising, because Nipype used exactly the same bash code that we were using in the original framework example above. To verify this, we can call the cmdline function of the constructed BET instance. End of explanation """ skullstrip = BET(in_file="/data/ds102/sub-01/anat/sub-01_T1w.nii.gz", out_file="/data/ds102/sub-01/anat/T1w_nipype_bet.nii.gz", mask=True) res = skullstrip.run() """ Explanation: Another way to set the inputs on an interface object is to use them as keyword arguments when you construct the interface instance. Let's write the Nipype code from above in this way, but let's also add the option to create a brain mask. End of explanation """ plot_anat('/data/ds102/sub-01/anat/T1w_nipype_bet_mask.nii.gz', title='original', display_mode='ortho', dim=-1, draw_cross=False, annotate=False) """ Explanation: Now if we plot this, we see again that this worked exactly as before. No surprise there. End of explanation """ BET.help() """ Explanation: Help Function But how did we know what the names of the input parameters are? In the original framework we were able to just run BET, without any additional parameters to get an information page. In the Nipype framework we can achieve the same thing by using the help() function on an interface class. For the BET example, this is: End of explanation """ print res.outputs.mask_file """ Explanation: As you can see, we get three different informations. First, a general explanation of the class. Wraps command **bet** Use FSL BET command for skull stripping. For complete details, see the `BET Documentation. &lt;http://www.fmrib.ox.ac.uk/fsl/bet2/index.html&gt;`_ Examples -------- &gt;&gt;&gt; from nipype.interfaces import fsl &gt;&gt;&gt; from nipype.testing import example_data &gt;&gt;&gt; btr = fsl.BET() &gt;&gt;&gt; btr.inputs.in_file = example_data('structural.nii') &gt;&gt;&gt; btr.inputs.frac = 0.7 &gt;&gt;&gt; res = btr.run() # doctest: +SKIP Second, a list of all possible input parameters. Inputs:: [Mandatory] in_file: (an existing file name) input file to skull strip flag: %s, position: 0 [Optional] args: (a string) Additional parameters to the command flag: %s center: (a list of at most 3 items which are an integer (int or long)) center of gravity in voxels flag: -c %s environ: (a dictionary with keys which are a value of type 'str' and with values which are a value of type 'str', nipype default value: {}) Environment variables frac: (a float) fractional intensity threshold flag: -f %.2f functional: (a boolean) apply to 4D fMRI data flag: -F mutually_exclusive: functional, reduce_bias, robust, padding, remove_eyes, surfaces, t2_guided ignore_exception: (a boolean, nipype default value: False) Print an error message instead of throwing an exception in case the interface fails to run mask: (a boolean) create binary mask image flag: -m mesh: (a boolean) generate a vtk mesh brain surface flag: -e no_output: (a boolean) Don't generate segmented output flag: -n out_file: (a file name) name of output skull stripped image flag: %s, position: 1 outline: (a boolean) create surface outline image flag: -o output_type: ('NIFTI_PAIR' or 'NIFTI_PAIR_GZ' or 'NIFTI_GZ' or 'NIFTI') FSL output type padding: (a boolean) improve BET if FOV is very small in Z (by temporarily padding end slices) flag: -Z mutually_exclusive: functional, reduce_bias, robust, padding, remove_eyes, surfaces, t2_guided radius: (an integer (int or long)) head radius flag: -r %d reduce_bias: (a boolean) bias field and neck cleanup flag: -B mutually_exclusive: functional, reduce_bias, robust, padding, remove_eyes, surfaces, t2_guided remove_eyes: (a boolean) eye &amp; optic nerve cleanup (can be useful in SIENA) flag: -S mutually_exclusive: functional, reduce_bias, robust, padding, remove_eyes, surfaces, t2_guided robust: (a boolean) robust brain centre estimation (iterates BET several times) flag: -R mutually_exclusive: functional, reduce_bias, robust, padding, remove_eyes, surfaces, t2_guided skull: (a boolean) create skull image flag: -s surfaces: (a boolean) run bet2 and then betsurf to get additional skull and scalp surfaces (includes registrations) flag: -A mutually_exclusive: functional, reduce_bias, robust, padding, remove_eyes, surfaces, t2_guided t2_guided: (a file name) as with creating surfaces, when also feeding in non-brain-extracted T2 (includes registrations) flag: -A2 %s mutually_exclusive: functional, reduce_bias, robust, padding, remove_eyes, surfaces, t2_guided terminal_output: ('stream' or 'allatonce' or 'file' or 'none') Control terminal output: `stream` - displays to terminal immediately (default), `allatonce` - waits till command is finished to display output, `file` - writes output to file, `none` - output is ignored threshold: (a boolean) apply thresholding to segmented brain image and mask flag: -t vertical_gradient: (a float) vertical gradient in fractional intensity threshold (-1, 1) flag: -g %.2f And third, a list of all possible output parameters. Outputs:: inskull_mask_file: (a file name) path/name of inskull mask (if generated) inskull_mesh_file: (a file name) path/name of inskull mesh outline (if generated) mask_file: (a file name) path/name of binary brain mask (if generated) meshfile: (a file name) path/name of vtk mesh file (if generated) out_file: (a file name) path/name of skullstripped file (if generated) outline_file: (a file name) path/name of outline file (if generated) outskin_mask_file: (a file name) path/name of outskin mask (if generated) outskin_mesh_file: (a file name) path/name of outskin mesh outline (if generated) outskull_mask_file: (a file name) path/name of outskull mask (if generated) outskull_mesh_file: (a file name) path/name of outskull mesh outline (if generated) skull_mask_file: (a file name) path/name of skull mask (if generated) So here we see that Nipype also has output parameters. This is very practical. Because instead of typing the full path name to the mask volume, we can also more directly use the mask_file parameter. End of explanation """ skullstrip2 = BET() skullstrip2.run() """ Explanation: Interface errors To execute any interface class we use the run method on that object. For FSL, Freesurfer, and other programs, this will just make a system call with the command line we saw above. For MATLAB-based programs like SPM, it will actually generate a .m file and run a MATLAB process to execute it. All of that is handled in the background. But what happens if we didn't specify all necessary inputs? For instance, you need to give BET a file to work on. If you try and run it without setting the input in_file, you'll get a Python exception before anything actually gets executed: End of explanation """ skullstrip.inputs.mask = "mask_file.nii" """ Explanation: Nipype also knows some things about what sort of values should get passed to the inputs, and will raise (hopefully) informative exceptions when they are violated -- before anything gets processed. For example, BET just lets you say "create a mask," it doesn't let you name it. You may forget this, and try to give it a name. In this case, Nipype will raise a TraitError telling you what you did wrong: End of explanation """ skullstrip.inputs.in_file = "/data/oops_a_typo.nii" """ Explanation: Additionally, Nipype knows that, for inputs corresponding to files you are going to process, they should exist in your file system. If you pass a string that doesn't correspond to an existing file, it will error and let you know: End of explanation """ skullstrip = BET(in_file="/data/ds102/sub-01/anat/sub-01_T1w.nii.gz") print(skullstrip.cmdline) """ Explanation: It turns out that for default output files, you don't even need to specify a name. Nipype will know what files are going to be created and will generate a name for you: End of explanation """ res = skullstrip.run() print(res.outputs) """ Explanation: Note that it is going to write the output file to the local directory. What if you just ran this interface and wanted to know what it called the file that was produced? As you might have noticed before, calling the run method returned an object called InterfaceResult that we saved under the variable res. Let's inspect that object: End of explanation """ res2 = skullstrip.run(mask=True) print(res2.outputs) """ Explanation: We see that four possible files can be generated by BET. Here we ran it in the most simple way possible, so it just generated an out_file, which is the skull-stripped image. Let's see what happens when we generate a mask. By the way, you can also set inputs at runtime by including them as arguments to the run method: End of explanation """
ybao2016/tf-slim-model
slim_walkthrough.ipynb
apache-2.0
import matplotlib %matplotlib inline import matplotlib.pyplot as plt import math import numpy as np import tensorflow as tf import time from datasets import dataset_utils # Main slim library slim = tf.contrib.slim """ Explanation: TF-Slim Walkthrough This notebook will walk you through the basics of using TF-Slim to define, train and evaluate neural networks on various tasks. It assumes a basic knowledge of neural networks. Table of contents <a href="#Install">Installation and setup</a><br> <a href='#MLP'>Creating your first neural network with TF-Slim</a><br> <a href='#ReadingTFSlimDatasets'>Reading Data with TF-Slim</a><br> <a href='#CNN'>Training a convolutional neural network (CNN)</a><br> <a href='#Pretained'>Using pre-trained models</a><br> Installation and setup <a id='Install'></a> As of 8/28/16, the latest stable release of TF is r0.10, which does not contain the latest version of slim. To obtain the latest version of TF-Slim, please install the most recent nightly build of TF as explained here. To use TF-Slim for image classification (as we do in this notebook), you also have to install the TF-Slim image models library from here. Let's suppose you install this into a directory called TF_MODELS. Then you should change directory to TF_MODELS/slim before running this notebook, so that these files are in your python path. To check you've got these two steps to work, just execute the cell below. If it complains about unknown modules, restart the notebook after moving to the TF-Slim models directory. End of explanation """ def regression_model(inputs, is_training=True, scope="deep_regression"): """Creates the regression model. Args: inputs: A node that yields a `Tensor` of size [batch_size, dimensions]. is_training: Whether or not we're currently training the model. scope: An optional variable_op scope for the model. Returns: predictions: 1-D `Tensor` of shape [batch_size] of responses. end_points: A dict of end points representing the hidden layers. """ with tf.variable_scope(scope, 'deep_regression', [inputs]): end_points = {} # Set the default weight _regularizer and acvitation for each fully_connected layer. with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(0.01)): # Creates a fully connected layer from the inputs with 32 hidden units. net = slim.fully_connected(inputs, 32, scope='fc1') end_points['fc1'] = net # Adds a dropout layer to prevent over-fitting. net = slim.dropout(net, 0.8, is_training=is_training) # Adds another fully connected layer with 16 hidden units. net = slim.fully_connected(net, 16, scope='fc2') end_points['fc2'] = net # Creates a fully-connected layer with a single hidden unit. Note that the # layer is made linear by setting activation_fn=None. predictions = slim.fully_connected(net, 1, activation_fn=None, scope='prediction') end_points['out'] = predictions return predictions, end_points """ Explanation: Creating your first neural network with TF-Slim <a id='MLP'></a> Below we give some code to create a simple multilayer perceptron (MLP) which can be used for regression problems. The model has 2 hidden layers. The output is a single node. When this function is called, it will create various nodes, and silently add them to whichever global TF graph is currently in scope. When a node which corresponds to a layer with adjustable parameters (eg., a fully connected layer) is created, additional parameter variable nodes are silently created, and added to the graph. (We will discuss how to train the parameters later.) We use variable scope to put all the nodes under a common name, so that the graph has some hierarchical structure. This is useful when we want to visualize the TF graph in tensorboard, or if we want to query related variables. The fully connected layers all use the same L2 weight decay and ReLu activations, as specified by arg_scope. (However, the final layer overrides these defaults, and uses an identity activation function.) We also illustrate how to add a dropout layer after the first fully connected layer (FC1). Note that at test time, we do not drop out nodes, but instead use the average activations; hence we need to know whether the model is being constructed for training or testing, since the computational graph will be different in the two cases (although the variables, storing the model parameters, will be shared, since they have the same name/scope). End of explanation """ with tf.Graph().as_default(): # Dummy placeholders for arbitrary number of 1d inputs and outputs inputs = tf.placeholder(tf.float32, shape=(None, 1)) outputs = tf.placeholder(tf.float32, shape=(None, 1)) # Build model predictions, end_points = regression_model(inputs) # Print name and shape of each tensor. print "Layers" for k, v in end_points.iteritems(): print 'name = {}, shape = {}'.format(v.name, v.get_shape()) # Print name and shape of parameter nodes (values not yet initialized) print "\n" print "Parameters" for v in slim.get_model_variables(): print 'name = {}, shape = {}'.format(v.name, v.get_shape()) """ Explanation: Let's create the model and examine its structure. We create a TF graph and call regression_model(), which adds nodes (tensors) to the graph. We then examine their shape, and print the names of all the model variables which have been implicitly created inside of each layer. We see that the names of the variables follow the scopes that we specified. End of explanation """ def produce_batch(batch_size, noise=0.3): xs = np.random.random(size=[batch_size, 1]) * 10 ys = np.sin(xs) + 5 + np.random.normal(size=[batch_size, 1], scale=noise) return [xs.astype(np.float32), ys.astype(np.float32)] x_train, y_train = produce_batch(200) x_test, y_test = produce_batch(200) plt.scatter(x_train, y_train) """ Explanation: Let's create some 1d regression data . We will train and test the model on some noisy observations of a nonlinear function. End of explanation """ def convert_data_to_tensors(x, y): inputs = tf.constant(x) inputs.set_shape([None, 1]) outputs = tf.constant(y) outputs.set_shape([None, 1]) return inputs, outputs # The following snippet trains the regression model using a mean_squared_error loss. ckpt_dir = '/tmp/regression_model/' with tf.Graph().as_default(): tf.logging.set_verbosity(tf.logging.INFO) inputs, targets = convert_data_to_tensors(x_train, y_train) # Make the model. predictions, nodes = regression_model(inputs, is_training=True) # Add the loss function to the graph. loss = tf.losses.mean_squared_error(labels=targets, predictions=predictions) # The total loss is the uers's loss plus any regularization losses. total_loss = slim.losses.get_total_loss() # Specify the optimizer and create the train op: optimizer = tf.train.AdamOptimizer(learning_rate=0.005) train_op = slim.learning.create_train_op(total_loss, optimizer) # Run the training inside a session. final_loss = slim.learning.train( train_op, logdir=ckpt_dir, number_of_steps=5000, save_summaries_secs=5, log_every_n_steps=500) print("Finished training. Last batch loss:", final_loss) print("Checkpoint saved in %s" % ckpt_dir) """ Explanation: Let's fit the model to the data The user has to specify the loss function and the optimizer, and slim does the rest. In particular, the slim.learning.train function does the following: For each iteration, evaluate the train_op, which updates the parameters using the optimizer applied to the current minibatch. Also, update the global_step. Occasionally store the model checkpoint in the specified directory. This is useful in case your machine crashes - then you can simply restart from the specified checkpoint. End of explanation """ with tf.Graph().as_default(): inputs, targets = convert_data_to_tensors(x_train, y_train) predictions, end_points = regression_model(inputs, is_training=True) # Add multiple loss nodes. mean_squared_error_loss = tf.losses.mean_squared_error(labels=targets, predictions=predictions) absolute_difference_loss = slim.losses.absolute_difference(predictions, targets) # The following two ways to compute the total loss are equivalent regularization_loss = tf.add_n(slim.losses.get_regularization_losses()) total_loss1 = mean_squared_error_loss + absolute_difference_loss + regularization_loss # Regularization Loss is included in the total loss by default. # This is good for training, but not for testing. total_loss2 = slim.losses.get_total_loss(add_regularization_losses=True) init_op = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init_op) # Will initialize the parameters with random weights. total_loss1, total_loss2 = sess.run([total_loss1, total_loss2]) print('Total Loss1: %f' % total_loss1) print('Total Loss2: %f' % total_loss2) print('Regularization Losses:') for loss in slim.losses.get_regularization_losses(): print(loss) print('Loss Functions:') for loss in slim.losses.get_losses(): print(loss) """ Explanation: Training with multiple loss functions. Sometimes we have multiple objectives we want to simultaneously optimize. In slim, it is easy to add more losses, as we show below. (We do not optimize the total loss in this example, but we show how to compute it.) End of explanation """ with tf.Graph().as_default(): inputs, targets = convert_data_to_tensors(x_test, y_test) # Create the model structure. (Parameters will be loaded below.) predictions, end_points = regression_model(inputs, is_training=False) # Make a session which restores the old parameters from a checkpoint. sv = tf.train.Supervisor(logdir=ckpt_dir) with sv.managed_session() as sess: inputs, predictions, targets = sess.run([inputs, predictions, targets]) plt.scatter(inputs, targets, c='r'); plt.scatter(inputs, predictions, c='b'); plt.title('red=true, blue=predicted') """ Explanation: Let's load the saved model and use it for prediction. End of explanation """ with tf.Graph().as_default(): inputs, targets = convert_data_to_tensors(x_test, y_test) predictions, end_points = regression_model(inputs, is_training=False) # Specify metrics to evaluate: names_to_value_nodes, names_to_update_nodes = slim.metrics.aggregate_metric_map({ 'Mean Squared Error': slim.metrics.streaming_mean_squared_error(predictions, targets), 'Mean Absolute Error': slim.metrics.streaming_mean_absolute_error(predictions, targets) }) # Make a session which restores the old graph parameters, and then run eval. sv = tf.train.Supervisor(logdir=ckpt_dir) with sv.managed_session() as sess: metric_values = slim.evaluation.evaluation( sess, num_evals=1, # Single pass over data eval_op=names_to_update_nodes.values(), final_op=names_to_value_nodes.values()) names_to_values = dict(zip(names_to_value_nodes.keys(), metric_values)) for key, value in names_to_values.iteritems(): print('%s: %f' % (key, value)) """ Explanation: Let's compute various evaluation metrics on the test set. In TF-Slim termiology, losses are optimized, but metrics (which may not be differentiable, e.g., precision and recall) are just measured. As an illustration, the code below computes mean squared error and mean absolute error metrics on the test set. Each metric declaration creates several local variables (which must be initialized via tf.initialize_local_variables()) and returns both a value_op and an update_op. When evaluated, the value_op returns the current value of the metric. The update_op loads a new batch of data, runs the model, obtains the predictions and accumulates the metric statistics appropriately before returning the current value of the metric. We store these value nodes and update nodes in 2 dictionaries. After creating the metric nodes, we can pass them to slim.evaluation.evaluation, which repeatedly evaluates these nodes the specified number of times. (This allows us to compute the evaluation in a streaming fashion across minibatches, which is usefulf for large datasets.) Finally, we print the final value of each metric. End of explanation """ import tensorflow as tf from datasets import dataset_utils url = "http://download.tensorflow.org/data/flowers.tar.gz" flowers_data_dir = '/tmp/flowers' if not tf.gfile.Exists(flowers_data_dir): tf.gfile.MakeDirs(flowers_data_dir) dataset_utils.download_and_uncompress_tarball(url, flowers_data_dir) """ Explanation: Reading Data with TF-Slim <a id='ReadingTFSlimDatasets'></a> Reading data with TF-Slim has two main components: A Dataset and a DatasetDataProvider. The former is a descriptor of a dataset, while the latter performs the actions necessary for actually reading the data. Lets look at each one in detail: Dataset A TF-Slim Dataset contains descriptive information about a dataset necessary for reading it, such as the list of data files and how to decode them. It also contains metadata including class labels, the size of the train/test splits and descriptions of the tensors that the dataset provides. For example, some datasets contain images with labels. Others augment this data with bounding box annotations, etc. The Dataset object allows us to write generic code using the same API, regardless of the data content and encoding type. TF-Slim's Dataset works especially well when the data is stored as a (possibly sharded) TFRecords file, where each record contains a tf.train.Example protocol buffer. TF-Slim uses a consistent convention for naming the keys and values inside each Example record. DatasetDataProvider A DatasetDataProvider is a class which actually reads the data from a dataset. It is highly configurable to read the data in various ways that may make a big impact on the efficiency of your training process. For example, it can be single or multi-threaded. If your data is sharded across many files, it can read each files serially, or from every file simultaneously. Demo: The Flowers Dataset For convenience, we've include scripts to convert several common image datasets into TFRecord format and have provided the Dataset descriptor files necessary for reading them. We demonstrate how easy it is to use these dataset via the Flowers dataset below. Download the Flowers Dataset <a id='DownloadFlowers'></a> We've made available a tarball of the Flowers dataset which has already been converted to TFRecord format. End of explanation """ from datasets import flowers import tensorflow as tf slim = tf.contrib.slim with tf.Graph().as_default(): dataset = flowers.get_split('train', flowers_data_dir) data_provider = slim.dataset_data_provider.DatasetDataProvider( dataset, common_queue_capacity=32, common_queue_min=1) image, label = data_provider.get(['image', 'label']) with tf.Session() as sess: with slim.queues.QueueRunners(sess): for i in xrange(4): np_image, np_label = sess.run([image, label]) height, width, _ = np_image.shape class_name = name = dataset.labels_to_names[np_label] plt.figure() plt.imshow(np_image) plt.title('%s, %d x %d' % (name, height, width)) plt.axis('off') plt.show() """ Explanation: Display some of the data. End of explanation """ def my_cnn(images, num_classes, is_training): # is_training is not used... with slim.arg_scope([slim.max_pool2d], kernel_size=[3, 3], stride=2): net = slim.conv2d(images, 64, [5, 5]) net = slim.max_pool2d(net) net = slim.conv2d(net, 64, [5, 5]) net = slim.max_pool2d(net) net = slim.flatten(net) net = slim.fully_connected(net, 192) net = slim.fully_connected(net, num_classes, activation_fn=None) return net """ Explanation: Convolutional neural nets (CNNs). <a id='CNN'></a> In this section, we show how to train an image classifier using a simple CNN. Define the model. Below we define a simple CNN. Note that the output layer is linear function - we will apply softmax transformation externally to the model, either in the loss function (for training), or in the prediction function (during testing). End of explanation """ import tensorflow as tf with tf.Graph().as_default(): # The model can handle any input size because the first layer is convolutional. # The size of the model is determined when image_node is first passed into the my_cnn function. # Once the variables are initialized, the size of all the weight matrices is fixed. # Because of the fully connected layers, this means that all subsequent images must have the same # input size as the first image. batch_size, height, width, channels = 3, 28, 28, 3 images = tf.random_uniform([batch_size, height, width, channels], maxval=1) # Create the model. num_classes = 10 logits = my_cnn(images, num_classes, is_training=True) probabilities = tf.nn.softmax(logits) # Initialize all the variables (including parameters) randomly. init_op = tf.global_variables_initializer() with tf.Session() as sess: # Run the init_op, evaluate the model outputs and print the results: sess.run(init_op) probabilities = sess.run(probabilities) print('Probabilities Shape:') print(probabilities.shape) # batch_size x num_classes print('\nProbabilities:') print(probabilities) print('\nSumming across all classes (Should equal 1):') print(np.sum(probabilities, 1)) # Each row sums to 1 """ Explanation: Apply the model to some randomly generated images. End of explanation """ from preprocessing import inception_preprocessing import tensorflow as tf slim = tf.contrib.slim def load_batch(dataset, batch_size=32, height=299, width=299, is_training=False): """Loads a single batch of data. Args: dataset: The dataset to load. batch_size: The number of images in the batch. height: The size of each image after preprocessing. width: The size of each image after preprocessing. is_training: Whether or not we're currently training or evaluating. Returns: images: A Tensor of size [batch_size, height, width, 3], image samples that have been preprocessed. images_raw: A Tensor of size [batch_size, height, width, 3], image samples that can be used for visualization. labels: A Tensor of size [batch_size], whose values range between 0 and dataset.num_classes. """ data_provider = slim.dataset_data_provider.DatasetDataProvider( dataset, common_queue_capacity=32, common_queue_min=8) image_raw, label = data_provider.get(['image', 'label']) # Preprocess image for usage by Inception. image = inception_preprocessing.preprocess_image(image_raw, height, width, is_training=is_training) # Preprocess the image for display purposes. image_raw = tf.expand_dims(image_raw, 0) image_raw = tf.image.resize_images(image_raw, [height, width]) image_raw = tf.squeeze(image_raw) # Batch it up. images, images_raw, labels = tf.train.batch( [image, image_raw, label], batch_size=batch_size, num_threads=1, capacity=2 * batch_size) return images, images_raw, labels from datasets import flowers # This might take a few minutes. train_dir = '/tmp/tfslim_model/' print('Will save model to %s' % train_dir) with tf.Graph().as_default(): tf.logging.set_verbosity(tf.logging.INFO) dataset = flowers.get_split('train', flowers_data_dir) images, _, labels = load_batch(dataset) # Create the model: logits = my_cnn(images, num_classes=dataset.num_classes, is_training=True) # Specify the loss function: one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes) slim.losses.softmax_cross_entropy(logits, one_hot_labels) total_loss = slim.losses.get_total_loss() # Create some summaries to visualize the training process: tf.summary.scalar('losses/Total Loss', total_loss) # Specify the optimizer and create the train op: optimizer = tf.train.AdamOptimizer(learning_rate=0.01) train_op = slim.learning.create_train_op(total_loss, optimizer) # Run the training: final_loss = slim.learning.train( train_op, logdir=train_dir, number_of_steps=1, # For speed, we just do 1 epoch save_summaries_secs=1) print('Finished training. Final batch loss %d' % final_loss) """ Explanation: Train the model on the Flowers dataset. Before starting, make sure you've run the code to <a href="#DownloadFlowers">Download the Flowers</a> dataset. Now, we'll get a sense of what it looks like to use TF-Slim's training functions found in learning.py. First, we'll create a function, load_batch, that loads batches of dataset from a dataset. Next, we'll train a model for a single step (just to demonstrate the API), and evaluate the results. End of explanation """ from datasets import flowers # This might take a few minutes. with tf.Graph().as_default(): tf.logging.set_verbosity(tf.logging.DEBUG) dataset = flowers.get_split('train', flowers_data_dir) images, _, labels = load_batch(dataset) logits = my_cnn(images, num_classes=dataset.num_classes, is_training=False) predictions = tf.argmax(logits, 1) # Define the metrics: names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({ 'eval/Accuracy': slim.metrics.streaming_accuracy(predictions, labels), 'eval/Recall@5': slim.metrics.streaming_recall_at_k(logits, labels, 5), }) print('Running evaluation Loop...') checkpoint_path = tf.train.latest_checkpoint(train_dir) metric_values = slim.evaluation.evaluate_once( master='', checkpoint_path=checkpoint_path, logdir=train_dir, eval_op=names_to_updates.values(), final_op=names_to_values.values()) names_to_values = dict(zip(names_to_values.keys(), metric_values)) for name in names_to_values: print('%s: %f' % (name, names_to_values[name])) """ Explanation: Evaluate some metrics. As we discussed above, we can compute various metrics besides the loss. Below we show how to compute prediction accuracy of the trained model, as well as top-5 classification accuracy. (The difference between evaluation and evaluation_loop is that the latter writes the results to a log directory, so they can be viewed in tensorboard.) End of explanation """ from datasets import dataset_utils url = "http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz" checkpoints_dir = '/tmp/checkpoints' if not tf.gfile.Exists(checkpoints_dir): tf.gfile.MakeDirs(checkpoints_dir) dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir) """ Explanation: Using pre-trained models <a id='Pretrained'></a> Neural nets work best when they have many parameters, making them very flexible function approximators. However, this means they must be trained on big datasets. Since this process is slow, we provide various pre-trained models - see the list here. You can either use these models as-is, or you can perform "surgery" on them, to modify them for some other task. For example, it is common to "chop off" the final pre-softmax layer, and replace it with a new set of weights corresponding to some new set of labels. You can then quickly fine tune the new model on a small new dataset. We illustrate this below, using inception-v1 as the base model. While models like Inception V3 are more powerful, Inception V1 is used for speed purposes. Take into account that VGG and ResNet final layers have only 1000 outputs rather than 1001. The ImageNet dataset provied has an empty background class which can be used to fine-tune the model to other tasks. VGG and ResNet models provided here don't use that class. We provide two examples of using pretrained models: Inception V1 and VGG-19 models to highlight this difference. Download the Inception V1 checkpoint End of explanation """ import numpy as np import os import tensorflow as tf import urllib2 from datasets import imagenet from nets import inception from preprocessing import inception_preprocessing slim = tf.contrib.slim image_size = inception.inception_v1.default_image_size with tf.Graph().as_default(): url = 'https://upload.wikimedia.org/wikipedia/commons/7/70/EnglishCockerSpaniel_simon.jpg' image_string = urllib2.urlopen(url).read() image = tf.image.decode_jpeg(image_string, channels=3) processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False) processed_images = tf.expand_dims(processed_image, 0) # Create the model, use the default arg scope to configure the batch norm parameters. with slim.arg_scope(inception.inception_v1_arg_scope()): logits, _ = inception.inception_v1(processed_images, num_classes=1001, is_training=False) probabilities = tf.nn.softmax(logits) init_fn = slim.assign_from_checkpoint_fn( os.path.join(checkpoints_dir, 'inception_v1.ckpt'), slim.get_model_variables('InceptionV1')) with tf.Session() as sess: init_fn(sess) np_image, probabilities = sess.run([image, probabilities]) probabilities = probabilities[0, 0:] sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])] plt.figure() plt.imshow(np_image.astype(np.uint8)) plt.axis('off') plt.show() names = imagenet.create_readable_names_for_imagenet_labels() for i in range(5): index = sorted_inds[i] print('Probability %0.2f%% => [%s]' % (probabilities[index], names[index])) """ Explanation: Apply Pre-trained Inception V1 model to Images. We have to convert each image to the size expected by the model checkpoint. There is no easy way to determine this size from the checkpoint itself. So we use a preprocessor to enforce this. End of explanation """ from datasets import dataset_utils import tensorflow as tf url = "http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz" checkpoints_dir = '/tmp/checkpoints' if not tf.gfile.Exists(checkpoints_dir): tf.gfile.MakeDirs(checkpoints_dir) dataset_utils.download_and_uncompress_tarball(url, checkpoints_dir) """ Explanation: Download the VGG-16 checkpoint End of explanation """ import numpy as np import os import tensorflow as tf import urllib2 from datasets import imagenet from nets import vgg from preprocessing import vgg_preprocessing slim = tf.contrib.slim image_size = vgg.vgg_16.default_image_size with tf.Graph().as_default(): url = 'https://upload.wikimedia.org/wikipedia/commons/d/d9/First_Student_IC_school_bus_202076.jpg' image_string = urllib2.urlopen(url).read() image = tf.image.decode_jpeg(image_string, channels=3) processed_image = vgg_preprocessing.preprocess_image(image, image_size, image_size, is_training=False) processed_images = tf.expand_dims(processed_image, 0) # Create the model, use the default arg scope to configure the batch norm parameters. with slim.arg_scope(vgg.vgg_arg_scope()): # 1000 classes instead of 1001. logits, _ = vgg.vgg_16(processed_images, num_classes=1000, is_training=False) probabilities = tf.nn.softmax(logits) init_fn = slim.assign_from_checkpoint_fn( os.path.join(checkpoints_dir, 'vgg_16.ckpt'), slim.get_model_variables('vgg_16')) with tf.Session() as sess: init_fn(sess) np_image, probabilities = sess.run([image, probabilities]) probabilities = probabilities[0, 0:] sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])] plt.figure() plt.imshow(np_image.astype(np.uint8)) plt.axis('off') plt.show() names = imagenet.create_readable_names_for_imagenet_labels() for i in range(5): index = sorted_inds[i] # Shift the index of a class name by one. print('Probability %0.2f%% => [%s]' % (probabilities[index], names[index+1])) """ Explanation: Apply Pre-trained VGG-16 model to Images. We have to convert each image to the size expected by the model checkpoint. There is no easy way to determine this size from the checkpoint itself. So we use a preprocessor to enforce this. Pay attention to the difference caused by 1000 classes instead of 1001. End of explanation """ # Note that this may take several minutes. import os from datasets import flowers from nets import inception from preprocessing import inception_preprocessing slim = tf.contrib.slim image_size = inception.inception_v1.default_image_size def get_init_fn(): """Returns a function run by the chief worker to warm-start the training.""" checkpoint_exclude_scopes=["InceptionV1/Logits", "InceptionV1/AuxLogits"] exclusions = [scope.strip() for scope in checkpoint_exclude_scopes] variables_to_restore = [] for var in slim.get_model_variables(): excluded = False for exclusion in exclusions: if var.op.name.startswith(exclusion): excluded = True break if not excluded: variables_to_restore.append(var) return slim.assign_from_checkpoint_fn( os.path.join(checkpoints_dir, 'inception_v1.ckpt'), variables_to_restore) train_dir = '/tmp/inception_finetuned/' with tf.Graph().as_default(): tf.logging.set_verbosity(tf.logging.INFO) dataset = flowers.get_split('train', flowers_data_dir) images, _, labels = load_batch(dataset, height=image_size, width=image_size) # Create the model, use the default arg scope to configure the batch norm parameters. with slim.arg_scope(inception.inception_v1_arg_scope()): logits, _ = inception.inception_v1(images, num_classes=dataset.num_classes, is_training=True) # Specify the loss function: one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes) slim.losses.softmax_cross_entropy(logits, one_hot_labels) total_loss = slim.losses.get_total_loss() # Create some summaries to visualize the training process: tf.summary.scalar('losses/Total Loss', total_loss) # Specify the optimizer and create the train op: optimizer = tf.train.AdamOptimizer(learning_rate=0.01) train_op = slim.learning.create_train_op(total_loss, optimizer) # Run the training: final_loss = slim.learning.train( train_op, logdir=train_dir, init_fn=get_init_fn(), number_of_steps=2) print('Finished training. Last batch loss %f' % final_loss) """ Explanation: Fine-tune the model on a different set of labels. We will fine tune the inception model on the Flowers dataset. End of explanation """ import numpy as np import tensorflow as tf from datasets import flowers from nets import inception slim = tf.contrib.slim image_size = inception.inception_v1.default_image_size batch_size = 3 with tf.Graph().as_default(): tf.logging.set_verbosity(tf.logging.INFO) dataset = flowers.get_split('train', flowers_data_dir) images, images_raw, labels = load_batch(dataset, height=image_size, width=image_size) # Create the model, use the default arg scope to configure the batch norm parameters. with slim.arg_scope(inception.inception_v1_arg_scope()): logits, _ = inception.inception_v1(images, num_classes=dataset.num_classes, is_training=True) probabilities = tf.nn.softmax(logits) checkpoint_path = tf.train.latest_checkpoint(train_dir) init_fn = slim.assign_from_checkpoint_fn( checkpoint_path, slim.get_variables_to_restore()) with tf.Session() as sess: with slim.queues.QueueRunners(sess): sess.run(tf.initialize_local_variables()) init_fn(sess) np_probabilities, np_images_raw, np_labels = sess.run([probabilities, images_raw, labels]) for i in xrange(batch_size): image = np_images_raw[i, :, :, :] true_label = np_labels[i] predicted_label = np.argmax(np_probabilities[i, :]) predicted_name = dataset.labels_to_names[predicted_label] true_name = dataset.labels_to_names[true_label] plt.figure() plt.imshow(image.astype(np.uint8)) plt.title('Ground Truth: [%s], Prediction [%s]' % (true_name, predicted_name)) plt.axis('off') plt.show() """ Explanation: Apply fine tuned model to some images. End of explanation """
shead-custom-design/pipecat
docs/battery-chargers.ipynb
gpl-3.0
# nbconvert: hide from __future__ import absolute_import, division, print_function import sys sys.path.append("../features/steps") import test serial = test.mock_module("serial") serial.serial_for_url.side_effect = test.read_file("../data/icharger208b-charging", stop=3) import serial port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) for line in port: print(line) """ Explanation: .. _battery-chargers: Battery Chargers Many modern "smart" battery chargers have data logging capabilities that you can use with Pipecat to view the state of a battery during charging. Typically, these chargers connect to your computer via a serial port or a serial-over-USB cable that acts like a traditional serial port. Data is then sent to the port during charging. For example, data from an iCharger 208B connected to a Mac computer using a USB cable could be read using the following: End of explanation """ import pipecat.utility port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) pipe = pipecat.utility.readline(port) for record in pipe: print(record) """ Explanation: Here, we used the pySerial (http://pyserial.readthedocs.io) library to open a serial port and read data from the charger, which sends data to the port one line at a time. Note that the device name for the port - "/dev/cu.SLAB_USBtoUART" in this case - will vary depending on your device and operating system. For example, you might use "COM1" or "COM2" on Windows, or "/dev/ttyS0" on Linux. Our first step in using Pipecat to decipher the raw device output is to turn the for-loop from the previous example into a pipe: End of explanation """ import pipecat.device.charger port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) pipe = pipecat.utility.readline(port) pipe = pipecat.device.charger.icharger208b(pipe) for record in pipe: print(record) """ Explanation: In this case, :func:pipecat.utility.readline converts the raw data into :ref:records that store each line of data for further processing. To decode the contents of each line, we add the appropriate Pipecat device to the end of the pipe: End of explanation """ import pipecat.record port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) pipe = pipecat.utility.readline(port) pipe = pipecat.device.charger.icharger208b(pipe) for record in pipe: pipecat.record.dump(record) """ Explanation: As you can see, :func:pipecat.device.charger.icharger208b has converted the raw data records into charger-specific records containing human-readable fields whose values have appropriate physical units. Let's use :func:pipecat.record.dump to make the output a little more readable: End of explanation """ port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) pipe = pipecat.utility.readline(port) pipe = pipecat.device.charger.icharger208b(pipe) for record in pipe: print(record[("battery", "voltage")], record[("battery", "current")]) """ Explanation: Now, you can extract just the data you care about from a record: End of explanation """ port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) pipe = pipecat.utility.readline(port) pipe = pipecat.device.charger.icharger208b(pipe) for record in pipe: print(record[("battery", "current")].to(pipecat.units.amps)) """ Explanation: And you can convert units safely and explicitly: End of explanation """ fobj = open("../data/icharger208b-charging-sample") pipe = pipecat.utility.readline(fobj) pipe = pipecat.device.charger.icharger208b(pipe) for record in pipe: print(record[("battery", "charge")]) """ Explanation: As an aside, you may be wondering at this point why it's necessary to explicitly create the serial port and connect it to readline ... why not code that functionality directly into icharger208b? The answer is flexibility: by separating Pipecat's functionality into discrete, well-defined components, those components can be easily combined in new and unexpected ways. For example, you could use icharger208b with a charger that communicated over a network socket instead of a serial port. Or you could "replay" data from a charger stored in a file: End of explanation """ # nbconvert: hide serial.serial_for_url.side_effect = test.read_file("../data/icharger208b-charging", rate=pipecat.quantity(2, pipecat.units.seconds), stop=3) port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) pipe = pipecat.utility.readline(port) pipe = pipecat.device.charger.icharger208b(pipe) pipe = pipecat.utility.add_timestamp(pipe) for record in pipe: pipecat.record.dump(record) """ Explanation: Let's explore other things we can do with our pipe. To begin, you might want to add additional metadata to the records returned from a device. For example, you might want to append timestamps: End of explanation """ port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) pipe = pipecat.utility.readline(port) pipe = pipecat.device.charger.icharger208b(pipe) pipe = pipecat.utility.add_timestamp(pipe) for record in pipe: print( record["timestamp"].to("local").format("YYYY-MM-DD hh:mm:ss a"), record[("battery", "voltage")], ) """ Explanation: Note that :func:pipecat.utility.add_timestamp has added a timestamp field to each record. Timestamps in Pipecat are always recorded using UTC (universal) time, so you will likely want to convert them to your local timezone before formatting them for display: End of explanation """ # nbconvert: hide serial.serial_for_url.side_effect = test.read_file("../data/icharger208b-charging") import pipecat.store port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) pipe = pipecat.utility.readline(port) pipe = pipecat.device.charger.icharger208b(pipe) pipe = pipecat.store.cache(pipe) for record in pipe: pass print(len(pipe.table)) print(pipe.table[("battery", "voltage")]) """ Explanation: You could also use :func:pipecat.utility.add_field to append your own custom field to every record that passes through the pipe. Now let's consider calculating some simple statistics, such as the minimum and maximum battery voltage while charging. When we iterate over the contents of a pipe using a for loop, we receive one record at-a-time until the pipe is empty. We could keep track of a "running" minimum and maximum during iteration, and there are use-cases where that is the best way to solve the problem. However, for moderately-sized data, Pipecat provides a more convenient approach: End of explanation """ import pipecat.store port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) pipe = pipecat.utility.readline(port) pipe = pipecat.device.charger.icharger208b(pipe) pipe = pipecat.store.cache(pipe) for record in pipe: pass print("Min:", pipe.table[("battery", "voltage")].min()) print("Max:", pipe.table[("battery", "voltage")].max()) """ Explanation: Here, :func:pipecat.store.cache creates an in-memory cache that stores every record it receives. We have a do-nothing for loop that reads data from the charger to populate the cache. Once that's complete, we can use the cache table attribute to retrieve data from the cache using the same keys and syntax we would use with a record. Unlike a record, the cache returns every value for a given key at once (using a Numpy array), which makes it easy to compute the statistics we're interested in: End of explanation """ import toyplot canvas = toyplot.Canvas() axes = canvas.cartesian(grid=(3, 1, 0), label="Battery", ylabel="Voltage (V)") axes.plot(pipe.table[("battery", "voltage")].to(pipecat.units.volt)) axes = canvas.cartesian(grid=(3, 1, 1), ylabel="Current (A)") axes.plot(pipe.table[("battery", "current")].to(pipecat.units.amp)) axes = canvas.cartesian(grid=(3, 1, 2), ylabel="Charge (mAH)") axes.plot(pipe.table[("battery", "charge")].to(pipecat.units.milliamp * pipecat.units.hour)); """ Explanation: Consolidating fields using the cache is also perfect for generating plots with a library like Toyplot (http://toyplot.readthedocs.io): End of explanation """ # nbconvert: hide import os serial.serial_for_url.side_effect = test.read_file("../data/icharger208b-charging", rate=pipecat.quantity(2, pipecat.units.seconds), stop=3) if os.path.exists("charger.pickle"): os.remove("charger.pickle") import pipecat.store.pickle port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) pipe = pipecat.utility.readline(port) pipe = pipecat.device.charger.icharger208b(pipe) pipe = pipecat.utility.add_timestamp(pipe) pipe = pipecat.store.pickle.write(pipe, "charger.pickle") for record in pipe: pass pipe = pipecat.store.pickle.read("charger.pickle") pipe = pipecat.store.cache(pipe) for record in pipe: pipecat.record.dump(record) print("Average:", pipe.table[("battery", "voltage")].mean()) """ Explanation: Note that nothing prevents us from doing useful work in the for loop that populates the cache, and nothing prevents us from accessing the cache within the loop. For example, we might want to display field values from individual records alongside a running average computed from the cache. Or we might want to update our plot periodically as the loop progresses. Moving on, you will likely want to store records to disk for later access. Pipecat provides components to make this easy too. First, you can add :func:pipecat.store.pickle.write to the end of a pipe, to write records to disk using Python's pickle format: End of explanation """ # nbconvert: hide serial.serial_for_url.side_effect = test.read_file("../data/icharger208b-charging", rate=pipecat.quantity(2, pipecat.units.seconds), stop=10, block=True) """ Explanation: This is another example of the interchangeability of the Pipecat components: the pickle writer is a record consumer, and the pickle reader is a record generator. In essence, we "broke" our previous pipe into two separate pipes that communicate via the filesystem. While we won't go into detail here, a similar approach could be used to communicate between threads using a message queue or between processes over a socket. End of explanation """ import pipecat.limit port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) pipe = pipecat.utility.readline(port) pipe = pipecat.device.charger.icharger208b(pipe) pipe = pipecat.utility.add_timestamp(pipe) pipe = pipecat.limit.count(pipe, count=2) for record in pipe: pipecat.record.dump(record) """ Explanation: There is one final issue that we've ignored so far: when to stop logging data. The :func:pipecat.utility.readline() function will read data from the serial port as long as the port is open, blocking indefinitely if no data is arriving. That means that for all the preceeding examples the for loop will never end unless the serial port is closed (i.e. the external device is turned-off or unplugged), or the code is interrupted using Control-C. While that's fine for prototyping at the command line, we need to have a way to stop collecting data and shutdown cleanly if we're going to automate data logging processes. Fortunately, Pipecat provides several easy-to-use functions to do just that: End of explanation """ import pipecat.limit port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) pipe = pipecat.utility.readline(port) pipe = pipecat.device.charger.icharger208b(pipe) pipe = pipecat.utility.add_timestamp(pipe) pipe = pipecat.limit.duration(pipe, duration=pipecat.quantity(4, pipecat.units.seconds)) for record in pipe: pipecat.record.dump(record) """ Explanation: Here, :func:pipecat.limit.count ends the loop when count records have been received. This is often handy during development to limit the amount of data consumed from a device that produces output continuously. However, this approach is no good for devices like our charger that will produce a finite, indeterminate number of records - if the device stops sending records before the count has been reached, the loop will still block. Instead, we could use :func:pipecat.limit.duration to limit the total amount of time the loop is allowed to run instead: End of explanation """ # nbconvert: hide serial.serial_for_url.side_effect = test.read_file("../data/icharger208b-charging", rate=pipecat.quantity(2, pipecat.units.seconds), start=1285, block=True) import pipecat.limit port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) pipe = pipecat.utility.readline(port) pipe = pipecat.device.charger.icharger208b(pipe) pipe = pipecat.utility.add_timestamp(pipe) pipe = pipecat.limit.until(pipe, key=("charger", "mode"), value="finished") for record in pipe: pipecat.record.dump(record) """ Explanation: This approach is an improvement because it puts an upper-bound on the amount of time the loop will run, whether the device has stopped sending records or not. However, it's still error-prone, since we don't know in advance how long charging will take - if we set the duration too low, it may stop the loop before charging is complete. If we set the duration too high, we will capture all the records we want, but we will likely waste time waiting for records that will never come. Ideally, we would like to exit the loop as soon as the charger tells us it's finished. Fortunately, the charger provides a field - charger/mode that can do just that: End of explanation """ import pipecat.limit port = serial.serial_for_url("/dev/cu.SLAB_USBtoUART", baudrate=128000) pipe = pipecat.utility.readline(port) pipe = pipecat.device.charger.icharger208b(pipe) pipe = pipecat.utility.add_timestamp(pipe) pipe = pipecat.limit.until(pipe, key=("charger", "mode"), value="finished") pipe = pipecat.limit.timeout(pipe, timeout=pipecat.quantity(5, pipecat.units.seconds)) for record in pipe: pipecat.record.dump(record) """ Explanation: :func:pipecat.limit.until terminates the loop as soon as it receives a record with the given key and value. This approach finally gets us our desired behavior (loop ends as soon as the charger is finished), but it could use just a little more work to make it robust. For example, what happens if the charger stops sending data before the mode changes? We could combine :func:pipecat.limit.until with :func:pipecat.limit.duration, but that would still suffer from the terminate-too-soon / waste-too-much-time problem. Fortunately, we know from testing that our charger sends records every two seconds (if at all), and Pipecat provides :func:pipecat.limit.timeout, which can terminate the loop if it doesn't receive a record within a specified time interval: End of explanation """
ioos/pyoos
notebooks/NERRS.ipynb
lgpl-3.0
from datetime import datetime, timedelta import pandas as pd from pyoos.collectors.nerrs.nerrs_soap import NerrsSoap # FROM pyoos SOS handling # Convenience function to build record style time series representation def flatten_element(p): rd = {'time':p.time} for m in p.members: rd[m['standard']] = m['value'] return rd # sta.get_unique_members() serves the same function as the pyoos SOS get_unique_members method # Convenience function to extract a dict of unique members (observed properties) by standard name obsprops_bystdname = lambda sta: {m['standard']:m for m in sta.get_unique_members()} """ Explanation: https://gist.github.com/emiliom/57e84aee123ca60c4fa3 Accessing a NERRS station with Pyoos, via CDMO SOAP web services Illustrates querying all stations ("features") from a NERRS Reserve site; access to data from a NERRS station (specified by its station code); extraction of station metadata; and conversion of the returned multi-variable time series to a pandas DataFrame, followed by a time series plot from the DataFrame. Builds off the work from Dan Ramage (SECOORA), whose code is listed in the last cell, at the end. Note that this is running from a pyoos fork with some small but key changes to the nerrs collector. 2014 May 8-10. Emilio Mayorga. End of explanation """ # NERRS/CDMO access token. accesstoken = 'TOKEN STRING' # Initialize pyoos NERRS collector object nerrsData = NerrsSoap() # Access pdbpfmet station, for the last 7 days (roughly) nerrsData.filter(features=['pdbpfmet'], start=datetime.utcnow() - timedelta(days=7), end=datetime.utcnow() - timedelta(hours=12)) response = nerrsData.collect(accesstoken) sta = response.elements[0] obsprops_bystdname_dict = obsprops_bystdname(sta) # FROM pyoos SOS handling # For first (and only) station flattenedsta_0 = map(flatten_element, sta.elements) sta_0df = pd.DataFrame.from_records(flattenedsta_0, index=['time']) sta_0df.head() # Time series plot. # "wind_speed" is currently mispelled; that's in pyoos, and can be fixed easily obsprop_name = 'wind_sped' obsprop = obsprops_bystdname_dict[obsprop_name] sta_0df[obsprop_name].plot() ylabel(obsprop_name + ' ('+obsprop['unit']+')'); """ Explanation: First here's a very compact set of statements to get and plot the data for a station. No exploratory side trips. NOTE: I manually removed (commented out) the NERRS/CDMO access token after running this notebook, before uploading notebook to my github gist. Replace 'TOKEN STRING' with a token obtained from the NERRS/CDMO office End of explanation """ # pyoos NERRS collector nerrsData = NerrsSoap() """ Explanation: Now the same thing, but with lots of exploration in between End of explanation """ # Get all Padilla Bay stations (pdb) [featureid for featureid in nerrsData.list_features() if featureid.startswith('pdb')] # Access pdbpfmet station, for the last 7 days (roughly) nerrsData.filter(features=['pdbpfmet'], start=datetime.utcnow() - timedelta(days=7), end=datetime.utcnow() - timedelta(hours=12)) #nerrsData.filter(variables=["ATemp"]) response = nerrsData.collect() # The raw response (a string) is not used outside this cell. The collect method response is what's used # I'm showing the raw response here, just for reference raw = nerrsData.raw() type(raw), raw.keys() # response.elements is a one-element array with a paegan.cdm.dsg.features.station.Station element response.elements # Looks like the station in the response doesn't include any info about the Reserve it belongs to. Too bad. # Or is one of the pieces of information below the NERRS site? sta = response.elements[0] sta.__dict__.keys() sta.uid, sta.name, sta.description, sta.type, sta.location, sta.properties # 'siteid' and 'location_description' seem to refer to the NERRS reserve/site sta.get_property('siteid'), sta._properties staloc = sta.get_location() print staloc, '||', staloc.type, '||', staloc.xy obsprops_bystdname_dict = obsprops_bystdname(sta) obsprops_bystdname_dict['wind_sped'] # The individual observations are returned in the station "elements" stael = sta.elements type(stael), len(stael) stael0 = stael[0] stael0.time # See sta.get_unique_members(), above # stael0.get_member_names() returns a list of member names for this station 'element' stael0.members # From paegan: flatten Returns a Generator of Points that are part of this collection # Just exploring what this does... response.flatten # FROM pyoos SOS handling # For first (and only) station flattenedsta_0 = map(flatten_element, sta.elements) sta_0df = pd.DataFrame.from_records(flattenedsta_0, index=['time']) sta_0df.head() # Time series plot. # "wind_speed" is currently mispelled; that's in pyoos, and can be fixed easily obsprop_name = 'wind_sped' obsprop = obsprops_bystdname_dict[obsprop_name] sta_0df[obsprop_name].plot() ylabel(obsprop_name + ' ('+obsprop['unit']+')'); """ Explanation: May 10: Not sure if this will work, b/c the access token is passed via the collect method, so it hasn't been passed here yet! End of explanation """