SalazarPevelll
commited on
Commit
·
8fcf809
1
Parent(s):
4f018ed
fe
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- saved_models/server/API_result.csv +1 -0
- saved_models/server/config.json +23 -0
- saved_models/server/iteration_structure.json +1 -0
- saved_models/server/res.json +0 -0
- saved_models/server/server.py +388 -0
- saved_models/server/timevis_backend/__init__.py +0 -0
- saved_models/server/timevis_backend/__pycache__/__init__.cpython-37.pyc +0 -0
- saved_models/server/timevis_backend/__pycache__/backend_adapter.cpython-37.pyc +0 -0
- saved_models/server/timevis_backend/__pycache__/res_logging.cpython-37.pyc +0 -0
- saved_models/server/timevis_backend/__pycache__/utils.cpython-37.pyc +0 -0
- saved_models/server/timevis_backend/backend_adapter.py +765 -0
- saved_models/server/timevis_backend/res_logging.py +12 -0
- saved_models/server/timevis_backend/utils.py +250 -0
- saved_models/tensorboard/.bazelrc +9 -0
- saved_models/tensorboard/.clang-format +1 -0
- saved_models/tensorboard/.git-blame-ignore-revs +92 -0
- saved_models/tensorboard/.github/ISSUE_TEMPLATE/bug_report.md +40 -0
- saved_models/tensorboard/.github/ISSUE_TEMPLATE/feature_request.md +11 -0
- saved_models/tensorboard/.github/ISSUE_TEMPLATE/installation_problem.md +31 -0
- saved_models/tensorboard/.github/PULL_REQUEST_TEMPLATE.md +9 -0
- saved_models/tensorboard/.github/workflows/ci.yml +160 -0
- saved_models/tensorboard/.gitignore +7 -0
- saved_models/tensorboard/.prettierrc.json +6 -0
- saved_models/tensorboard/.travis.yml +110 -0
- saved_models/tensorboard/.vscode/settings.json +3 -0
- saved_models/tensorboard/ADDING_A_PLUGIN.md +194 -0
- saved_models/tensorboard/AUTHORS +10 -0
- saved_models/tensorboard/BUILD +24 -0
- saved_models/tensorboard/BUILD-local_execution_config_python.txt +290 -0
- saved_models/tensorboard/CONTRIBUTING.md +22 -0
- saved_models/tensorboard/DEVELOPMENT.md +117 -0
- saved_models/tensorboard/LICENSE +203 -0
- saved_models/tensorboard/README.md +427 -0
- saved_models/tensorboard/RELEASE.md +716 -0
- saved_models/tensorboard/SECURITY.md +10 -0
- saved_models/tensorboard/WORKSPACE +67 -0
- saved_models/tensorboard/WORKSPACE.bak +112 -0
- saved_models/tensorboard/ci/bazelrc +34 -0
- saved_models/tensorboard/ci/download_bazel.sh +38 -0
- saved_models/tensorboard/ci/download_buildifier.sh +38 -0
- saved_models/tensorboard/ci/download_buildozer.sh +38 -0
- saved_models/tensorboard/ci/download_executable.sh +48 -0
- saved_models/tensorboard/package.json +136 -0
- saved_models/tensorboard/pyproject.toml +5 -0
- saved_models/tensorboard/python_configure.bzl.txt +313 -0
- saved_models/tensorboard/tensorboard/BUILD +39 -0
- saved_models/tensorboard/tensorboard/components/BUILD +75 -0
- saved_models/tensorboard/tensorboard/components/analytics.ts +20 -0
- saved_models/tensorboard/tensorboard/components/experimental/plugin_lib/BUILD +30 -0
- saved_models/tensorboard/tensorboard/components/experimental/plugin_lib/core.ts +25 -0
saved_models/server/API_result.csv
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
SI,admin-e,2023-02-22-20:07:48
|
saved_models/server/config.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"embeddings": [
|
3 |
+
{
|
4 |
+
"tensorName": "CIFAR10 with images",
|
5 |
+
"tensorShape": [
|
6 |
+
10000,
|
7 |
+
784
|
8 |
+
],
|
9 |
+
"tensorPath": "https://storage.googleapis.com/embedding-projector/data/mnist_10k_784d_tensors.bytes",
|
10 |
+
"metadataPath": "https://gist.githubusercontent.com/hzf1174/3a7e85af7d09ebdfafac3d4d3ba5e71f/raw/502ad8aedc40fab7e56db917c57b48eaf0bd28fa/metadata.csv",
|
11 |
+
"sprite": {
|
12 |
+
"imagePath": "cifar10.png",
|
13 |
+
"singleImageDim": [
|
14 |
+
32,
|
15 |
+
32
|
16 |
+
]
|
17 |
+
}
|
18 |
+
}
|
19 |
+
],
|
20 |
+
"modelCheckpointPath": "Demo datasets",
|
21 |
+
"ServerIP": "localhost",
|
22 |
+
"ServerPort": "5001"
|
23 |
+
}
|
saved_models/server/iteration_structure.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[{"value": 1, "name": "1", "pid": ""},{"value": 2, "name": "checkpoint", "pid": 1}, {"value": 3, "name": "checkpoint", "pid": 2}, {"value": 4, "name": "checkpoint", "pid": 3}, {"value": 5, "name": "checkpoint", "pid": 4}, {"value": 6, "name": "checkpoint", "pid": 5}, {"value": 7, "name": "checkpoint", "pid": 6}, {"value": 8, "name": "checkpoint", "pid": 7}, {"value": 9, "name": "checkpoint", "pid": 8}, {"value": 10, "name": "checkpoint", "pid": 9}, {"value": 11, "name": "checkpoint", "pid": 10}, {"value": 12, "name": "checkpoint", "pid": 11}, {"value": 13, "name": "checkpoint", "pid": 12}, {"value": 14, "name": "checkpoint", "pid": 13}, {"value": 15, "name": "checkpoint", "pid": 14}, {"value": 16, "name": "checkpoint", "pid": 15}, {"value": 17, "name": "checkpoint", "pid": 16}, {"value": 18, "name": "checkpoint", "pid": 17}, {"value": 19, "name": "checkpoint", "pid": 18}, {"value": 20, "name": "checkpoint", "pid": 19}, {"value": 21, "name": "checkpoint", "pid": 20}, {"value": 22, "name": "checkpoint", "pid": 21}, {"value": 23, "name": "checkpoint", "pid": 22}, {"value": 24, "name": "checkpoint", "pid": 23}, {"value": 25, "name": "checkpoint", "pid": 24}, {"value": 26, "name": "checkpoint", "pid": 25}, {"value": 27, "name": "checkpoint", "pid": 26}, {"value": 28, "name": "checkpoint", "pid": 27}, {"value": 29, "name": "checkpoint", "pid": 28}, {"value": 30, "name": "checkpoint", "pid": 29}, {"value": 31, "name": "checkpoint", "pid": 30}, {"value": 32, "name": "checkpoint", "pid": 31}, {"value": 33, "name": "checkpoint", "pid": 32}, {"value": 34, "name": "checkpoint", "pid": 33}, {"value": 35, "name": "checkpoint", "pid": 34}, {"value": 36, "name": "checkpoint", "pid": 35}, {"value": 37, "name": "checkpoint", "pid": 36}, {"value": 38, "name": "checkpoint", "pid": 37}, {"value": 39, "name": "checkpoint", "pid": 38}, {"value": 40, "name": "checkpoint", "pid": 39}, {"value": 41, "name": "checkpoint", "pid": 40}, {"value": 42, "name": "checkpoint", "pid": 41}, {"value": 43, "name": "checkpoint", "pid": 42}, {"value": 44, "name": "checkpoint", "pid": 43}, {"value": 45, "name": "checkpoint", "pid": 44}, {"value": 46, "name": "checkpoint", "pid": 45}, {"value": 47, "name": "checkpoint", "pid": 46}, {"value": 48, "name": "checkpoint", "pid": 47}, {"value": 49, "name": "checkpoint", "pid": 48}, {"value": 50, "name": "checkpoint", "pid": 49}, {"value": 51, "name": "checkpoint", "pid": 50}, {"value": 52, "name": "checkpoint", "pid": 51}, {"value": 53, "name": "checkpoint", "pid": 52}, {"value": 54, "name": "checkpoint", "pid": 53}, {"value": 55, "name": "checkpoint", "pid": 54}, {"value": 56, "name": "checkpoint", "pid": 55}, {"value": 57, "name": "checkpoint", "pid": 56}, {"value": 58, "name": "checkpoint", "pid": 57}, {"value": 59, "name": "checkpoint", "pid": 58}, {"value": 60, "name": "checkpoint", "pid": 59}, {"value": 61, "name": "checkpoint", "pid": 60}, {"value": 62, "name": "checkpoint", "pid": 61}, {"value": 63, "name": "checkpoint", "pid": 62}, {"value": 64, "name": "checkpoint", "pid": 63}, {"value": 65, "name": "checkpoint", "pid": 64}, {"value": 66, "name": "checkpoint", "pid": 65}, {"value": 67, "name": "checkpoint", "pid": 66}, {"value": 68, "name": "checkpoint", "pid": 67}, {"value": 69, "name": "checkpoint", "pid": 68}, {"value": 70, "name": "checkpoint", "pid": 69}, {"value": 71, "name": "checkpoint", "pid": 70}, {"value": 72, "name": "checkpoint", "pid": 71}, {"value": 73, "name": "checkpoint", "pid": 72}, {"value": 74, "name": "checkpoint", "pid": 73}, {"value": 75, "name": "checkpoint", "pid": 74}, {"value": 76, "name": "checkpoint", "pid": 75}, {"value": 77, "name": "checkpoint", "pid": 76}, {"value": 78, "name": "checkpoint", "pid": 77}, {"value": 79, "name": "checkpoint", "pid": 78}, {"value": 80, "name": "checkpoint", "pid": 79}, {"value": 81, "name": "checkpoint", "pid": 80}, {"value": 82, "name": "checkpoint", "pid": 81}, {"value": 83, "name": "checkpoint", "pid": 82}, {"value": 84, "name": "checkpoint", "pid": 83}, {"value": 85, "name": "checkpoint", "pid": 84}, {"value": 86, "name": "checkpoint", "pid": 85}, {"value": 87, "name": "checkpoint", "pid": 86}, {"value": 88, "name": "checkpoint", "pid": 87}, {"value": 89, "name": "checkpoint", "pid": 88}, {"value": 90, "name": "checkpoint", "pid": 89}, {"value": 91, "name": "checkpoint", "pid": 90}, {"value": 92, "name": "checkpoint", "pid": 91}, {"value": 93, "name": "checkpoint", "pid": 92}, {"value": 94, "name": "checkpoint", "pid": 93}, {"value": 95, "name": "checkpoint", "pid": 94}, {"value": 96, "name": "checkpoint", "pid": 95}, {"value": 97, "name": "checkpoint", "pid": 96}, {"value": 98, "name": "checkpoint", "pid": 97}, {"value": 99, "name": "checkpoint", "pid": 98}, {"value": 100, "name": "checkpoint", "pid": 99}, {"value": 101, "name": "checkpoint", "pid": 100}, {"value": 102, "name": "checkpoint", "pid": 101}, {"value": 103, "name": "checkpoint", "pid": 102}, {"value": 104, "name": "checkpoint", "pid": 103}, {"value": 105, "name": "checkpoint", "pid": 104}, {"value": 106, "name": "checkpoint", "pid": 105}, {"value": 107, "name": "checkpoint", "pid": 106}, {"value": 108, "name": "checkpoint", "pid": 107}, {"value": 109, "name": "checkpoint", "pid": 108}, {"value": 110, "name": "checkpoint", "pid": 109}, {"value": 111, "name": "checkpoint", "pid": 110}, {"value": 112, "name": "checkpoint", "pid": 111}, {"value": 113, "name": "checkpoint", "pid": 112}, {"value": 114, "name": "checkpoint", "pid": 113}, {"value": 115, "name": "checkpoint", "pid": 114}, {"value": 116, "name": "checkpoint", "pid": 115}, {"value": 117, "name": "checkpoint", "pid": 116}, {"value": 118, "name": "checkpoint", "pid": 117}, {"value": 119, "name": "checkpoint", "pid": 118}, {"value": 120, "name": "checkpoint", "pid": 119}, {"value": 121, "name": "checkpoint", "pid": 120}, {"value": 122, "name": "checkpoint", "pid": 121}, {"value": 123, "name": "checkpoint", "pid": 122}, {"value": 124, "name": "checkpoint", "pid": 123}, {"value": 125, "name": "checkpoint", "pid": 124}, {"value": 126, "name": "checkpoint", "pid": 125}, {"value": 127, "name": "checkpoint", "pid": 126}, {"value": 128, "name": "checkpoint", "pid": 127}, {"value": 129, "name": "checkpoint", "pid": 128}, {"value": 130, "name": "checkpoint", "pid": 129}, {"value": 131, "name": "checkpoint", "pid": 130}, {"value": 132, "name": "checkpoint", "pid": 131}, {"value": 133, "name": "checkpoint", "pid": 132}, {"value": 134, "name": "checkpoint", "pid": 133}, {"value": 135, "name": "checkpoint", "pid": 134}, {"value": 136, "name": "checkpoint", "pid": 135}, {"value": 137, "name": "checkpoint", "pid": 136}, {"value": 138, "name": "checkpoint", "pid": 137}, {"value": 139, "name": "checkpoint", "pid": 138}, {"value": 140, "name": "checkpoint", "pid": 139}, {"value": 141, "name": "checkpoint", "pid": 140}, {"value": 142, "name": "checkpoint", "pid": 141}, {"value": 143, "name": "checkpoint", "pid": 142}, {"value": 144, "name": "checkpoint", "pid": 143}, {"value": 145, "name": "checkpoint", "pid": 144}, {"value": 146, "name": "checkpoint", "pid": 145}, {"value": 147, "name": "checkpoint", "pid": 146}, {"value": 148, "name": "checkpoint", "pid": 147}, {"value": 149, "name": "checkpoint", "pid": 148}, {"value": 150, "name": "checkpoint", "pid": 149}, {"value": 151, "name": "checkpoint", "pid": 150}, {"value": 152, "name": "checkpoint", "pid": 151}, {"value": 153, "name": "checkpoint", "pid": 152}, {"value": 154, "name": "checkpoint", "pid": 153}, {"value": 155, "name": "checkpoint", "pid": 154}, {"value": 156, "name": "checkpoint", "pid": 155}, {"value": 157, "name": "checkpoint", "pid": 156}, {"value": 158, "name": "checkpoint", "pid": 157}, {"value": 159, "name": "checkpoint", "pid": 158}, {"value": 160, "name": "checkpoint", "pid": 159}, {"value": 161, "name": "checkpoint", "pid": 160}, {"value": 162, "name": "checkpoint", "pid": 161}, {"value": 163, "name": "checkpoint", "pid": 162}, {"value": 164, "name": "checkpoint", "pid": 163}, {"value": 165, "name": "checkpoint", "pid": 164}, {"value": 166, "name": "checkpoint", "pid": 165}, {"value": 167, "name": "checkpoint", "pid": 166}, {"value": 168, "name": "checkpoint", "pid": 167}, {"value": 169, "name": "checkpoint", "pid": 168}, {"value": 170, "name": "checkpoint", "pid": 169}, {"value": 171, "name": "checkpoint", "pid": 170}, {"value": 172, "name": "checkpoint", "pid": 171}, {"value": 173, "name": "checkpoint", "pid": 172}, {"value": 174, "name": "checkpoint", "pid": 173}, {"value": 175, "name": "checkpoint", "pid": 174}, {"value": 176, "name": "checkpoint", "pid": 175}, {"value": 177, "name": "checkpoint", "pid": 176}, {"value": 178, "name": "checkpoint", "pid": 177}, {"value": 179, "name": "checkpoint", "pid": 178}, {"value": 180, "name": "checkpoint", "pid": 179}, {"value": 181, "name": "checkpoint", "pid": 180}, {"value": 182, "name": "checkpoint", "pid": 181}, {"value": 183, "name": "checkpoint", "pid": 182}, {"value": 184, "name": "checkpoint", "pid": 183}, {"value": 185, "name": "checkpoint", "pid": 184}, {"value": 186, "name": "checkpoint", "pid": 185}, {"value": 187, "name": "checkpoint", "pid": 186}, {"value": 188, "name": "checkpoint", "pid": 187}, {"value": 189, "name": "checkpoint", "pid": 188}, {"value": 190, "name": "checkpoint", "pid": 189}, {"value": 191, "name": "checkpoint", "pid": 190}, {"value": 192, "name": "checkpoint", "pid": 191}, {"value": 193, "name": "checkpoint", "pid": 192}, {"value": 194, "name": "checkpoint", "pid": 193}, {"value": 195, "name": "checkpoint", "pid": 194}, {"value": 196, "name": "checkpoint", "pid": 195}, {"value": 197, "name": "checkpoint", "pid": 196}, {"value": 198, "name": "checkpoint", "pid": 197}, {"value": 199, "name": "checkpoint", "pid": 198}]
|
saved_models/server/res.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
saved_models/server/server.py
ADDED
@@ -0,0 +1,388 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import request, Flask, jsonify, make_response
|
2 |
+
from flask_cors import CORS, cross_origin
|
3 |
+
import base64
|
4 |
+
import os
|
5 |
+
import sys
|
6 |
+
import json
|
7 |
+
import numpy as np
|
8 |
+
import gc
|
9 |
+
import shutil
|
10 |
+
|
11 |
+
from timevis_backend.utils import *
|
12 |
+
from timevis_backend.res_logging import add_line
|
13 |
+
|
14 |
+
# flask for API server
|
15 |
+
app = Flask(__name__)
|
16 |
+
cors = CORS(app, supports_credentials=True)
|
17 |
+
app.config['CORS_HEADERS'] = 'Content-Type'
|
18 |
+
|
19 |
+
session = 5
|
20 |
+
API_result_path = "./API_result.csv"
|
21 |
+
|
22 |
+
@app.route('/updateProjection', methods=["POST", "GET"])
|
23 |
+
@cross_origin()
|
24 |
+
def update_projection():
|
25 |
+
res = request.get_json()
|
26 |
+
CONTENT_PATH = os.path.normpath(res['path'])
|
27 |
+
iteration = int(res['iteration'])
|
28 |
+
predicates = res["predicates"]
|
29 |
+
# username = res['username']
|
30 |
+
|
31 |
+
sys.path.append(CONTENT_PATH)
|
32 |
+
timevis = initialize_backend(CONTENT_PATH)
|
33 |
+
EPOCH = (iteration-1)*timevis.data_provider.p + timevis.data_provider.s
|
34 |
+
|
35 |
+
embedding_2d, grid, decision_view, label_name_dict, label_color_list, label_list, max_iter, training_data_index, \
|
36 |
+
testing_data_index, eval_new, prediction_list, selected_points, properties = update_epoch_projection(timevis, EPOCH, predicates)
|
37 |
+
|
38 |
+
sys.path.remove(CONTENT_PATH)
|
39 |
+
# add_line(API_result_path,['TT',username])
|
40 |
+
return make_response(jsonify({'result': embedding_2d, 'grid_index': grid, 'grid_color': 'data:image/png;base64,' + decision_view,
|
41 |
+
'label_name_dict':label_name_dict,
|
42 |
+
'label_color_list': label_color_list, 'label_list': label_list,
|
43 |
+
'maximum_iteration': max_iter,
|
44 |
+
'training_data': training_data_index,
|
45 |
+
'testing_data': testing_data_index, 'evaluation': eval_new,
|
46 |
+
'prediction_list': prediction_list,
|
47 |
+
"selectedPoints":selected_points.tolist(),
|
48 |
+
"properties":properties.tolist()}), 200)
|
49 |
+
|
50 |
+
@app.route('/query', methods=["POST"])
|
51 |
+
@cross_origin()
|
52 |
+
def filter():
|
53 |
+
res = request.get_json()
|
54 |
+
CONTENT_PATH = os.path.normpath(res['content_path'])
|
55 |
+
iteration = int(res['iteration'])
|
56 |
+
predicates = res["predicates"]
|
57 |
+
username = res['username']
|
58 |
+
|
59 |
+
sys.path.append(CONTENT_PATH)
|
60 |
+
timevis = initialize_backend(CONTENT_PATH)
|
61 |
+
EPOCH = (iteration-1)*timevis.data_provider.p + timevis.data_provider.s
|
62 |
+
|
63 |
+
training_data_number = timevis.hyperparameters["TRAINING"]["train_num"]
|
64 |
+
testing_data_number = timevis.hyperparameters["TRAINING"]["test_num"]
|
65 |
+
|
66 |
+
current_index = timevis.get_epoch_index(EPOCH)
|
67 |
+
selected_points = np.arange(training_data_number)[current_index]
|
68 |
+
selected_points = np.concatenate((selected_points, np.arange(training_data_number, training_data_number + testing_data_number, 1)), axis=0)
|
69 |
+
# selected_points = np.arange(training_data_number + testing_data_number)
|
70 |
+
for key in predicates.keys():
|
71 |
+
if key == "label":
|
72 |
+
tmp = np.array(timevis.filter_label(predicates[key], int(EPOCH)))
|
73 |
+
elif key == "type":
|
74 |
+
tmp = np.array(timevis.filter_type(predicates[key], int(EPOCH)))
|
75 |
+
elif key == "confidence":
|
76 |
+
tmp = np.array(timevis.filter_conf(predicates[key][0],predicates[key][1],int(EPOCH)))
|
77 |
+
else:
|
78 |
+
tmp = np.arange(training_data_number + testing_data_number)
|
79 |
+
selected_points = np.intersect1d(selected_points, tmp)
|
80 |
+
sys.path.remove(CONTENT_PATH)
|
81 |
+
add_line(API_result_path,['SQ',username])
|
82 |
+
return make_response(jsonify({"selectedPoints": selected_points.tolist()}), 200)
|
83 |
+
|
84 |
+
|
85 |
+
# base64
|
86 |
+
@app.route('/sprite', methods=["POST","GET"])
|
87 |
+
@cross_origin()
|
88 |
+
def sprite_image():
|
89 |
+
path = request.args.get("path")
|
90 |
+
index = request.args.get("index")
|
91 |
+
username = request.args.get("username")
|
92 |
+
|
93 |
+
CONTENT_PATH = os.path.normpath(path)
|
94 |
+
print('index', index)
|
95 |
+
idx = int(index)
|
96 |
+
pic_save_dir_path = os.path.join(CONTENT_PATH, "sprites", "{}.png".format(idx))
|
97 |
+
img_stream = ''
|
98 |
+
with open(pic_save_dir_path, 'rb') as img_f:
|
99 |
+
img_stream = img_f.read()
|
100 |
+
img_stream = base64.b64encode(img_stream).decode()
|
101 |
+
add_line(API_result_path,['SI',username])
|
102 |
+
return make_response(jsonify({"imgUrl":'data:image/png;base64,' + img_stream}), 200)
|
103 |
+
|
104 |
+
|
105 |
+
@app.route('/spriteList', methods=["POST"])
|
106 |
+
@cross_origin()
|
107 |
+
def sprite_list_image():
|
108 |
+
data = request.get_json()
|
109 |
+
indices = data["index"]
|
110 |
+
path = data["path"]
|
111 |
+
|
112 |
+
|
113 |
+
CONTENT_PATH = os.path.normpath(path)
|
114 |
+
length = len(indices)
|
115 |
+
urlList = {}
|
116 |
+
|
117 |
+
for i in range(length):
|
118 |
+
idx = indices[i]
|
119 |
+
pic_save_dir_path = os.path.join(CONTENT_PATH, "sprites", "{}.png".format(idx))
|
120 |
+
img_stream = ''
|
121 |
+
with open(pic_save_dir_path, 'rb') as img_f:
|
122 |
+
img_stream = img_f.read()
|
123 |
+
img_stream = base64.b64encode(img_stream).decode()
|
124 |
+
urlList[idx] = 'data:image/png;base64,' + img_stream
|
125 |
+
# urlList.append('data:image/png;base64,' + img_stream)
|
126 |
+
return make_response(jsonify({"urlList":urlList}), 200)
|
127 |
+
|
128 |
+
|
129 |
+
@app.route('/al_query', methods=["POST"])
|
130 |
+
@cross_origin()
|
131 |
+
def al_query():
|
132 |
+
data = request.get_json()
|
133 |
+
CONTENT_PATH = os.path.normpath(data['content_path'])
|
134 |
+
iteration = data["iteration"]
|
135 |
+
strategy = data["strategy"]
|
136 |
+
budget = int(data["budget"])
|
137 |
+
acc_idxs = data["accIndices"]
|
138 |
+
rej_idxs = data["rejIndices"]
|
139 |
+
user_name = data["username"]
|
140 |
+
isRecommend = data["isRecommend"]
|
141 |
+
# TODO dense_al parameter from frontend
|
142 |
+
|
143 |
+
sys.path.append(CONTENT_PATH)
|
144 |
+
timevis = initialize_backend(CONTENT_PATH, dense_al=True)
|
145 |
+
# TODO add new sampling rule
|
146 |
+
indices, labels, scores = timevis.al_query(iteration, budget, strategy, np.array(acc_idxs).astype(np.int64), np.array(rej_idxs).astype(np.int64))
|
147 |
+
|
148 |
+
sort_i = np.argsort(-scores)
|
149 |
+
indices = indices[sort_i]
|
150 |
+
labels = labels[sort_i]
|
151 |
+
scores = scores[sort_i]
|
152 |
+
|
153 |
+
sys.path.remove(CONTENT_PATH)
|
154 |
+
if not isRecommend:
|
155 |
+
add_line(API_result_path,['Feedback', user_name])
|
156 |
+
else:
|
157 |
+
add_line(API_result_path,['Recommend', user_name])
|
158 |
+
return make_response(jsonify({"selectedPoints": indices.tolist(), "scores": scores.tolist(), "suggestLabels":labels.tolist()}), 200)
|
159 |
+
|
160 |
+
@app.route('/anomaly_query', methods=["POST"])
|
161 |
+
@cross_origin()
|
162 |
+
def anomaly_query():
|
163 |
+
data = request.get_json()
|
164 |
+
CONTENT_PATH = os.path.normpath(data['content_path'])
|
165 |
+
budget = int(data["budget"])
|
166 |
+
strategy = data["strategy"]
|
167 |
+
acc_idxs = data["accIndices"]
|
168 |
+
rej_idxs = data["rejIndices"]
|
169 |
+
user_name = data["username"]
|
170 |
+
isRecommend = data["isRecommend"]
|
171 |
+
|
172 |
+
sys.path.append(CONTENT_PATH)
|
173 |
+
|
174 |
+
timevis = initialize_backend(CONTENT_PATH)
|
175 |
+
timevis.save_acc_and_rej(acc_idxs, rej_idxs, user_name)
|
176 |
+
indices, scores, labels = timevis.suggest_abnormal(strategy, np.array(acc_idxs).astype(np.int64), np.array(rej_idxs).astype(np.int64), budget)
|
177 |
+
clean_list,_ = timevis.suggest_normal(strategy, np.array(acc_idxs).astype(np.int64), np.array(rej_idxs).astype(np.int64), 1)
|
178 |
+
|
179 |
+
sort_i = np.argsort(-scores)
|
180 |
+
indices = indices[sort_i]
|
181 |
+
labels = labels[sort_i]
|
182 |
+
scores = scores[sort_i]
|
183 |
+
|
184 |
+
sys.path.remove(CONTENT_PATH)
|
185 |
+
if not isRecommend:
|
186 |
+
add_line(API_result_path,['Feedback', user_name])
|
187 |
+
else:
|
188 |
+
add_line(API_result_path,['Recommend', user_name])
|
189 |
+
return make_response(jsonify({"selectedPoints": indices.tolist(), "scores": scores.tolist(), "suggestLabels":labels.tolist(),"cleanList":clean_list.tolist()}), 200)
|
190 |
+
|
191 |
+
@app.route('/al_train', methods=["POST"])
|
192 |
+
@cross_origin()
|
193 |
+
def al_train():
|
194 |
+
data = request.get_json()
|
195 |
+
CONTENT_PATH = os.path.normpath(data['content_path'])
|
196 |
+
acc_idxs = data["accIndices"]
|
197 |
+
rej_idxs = data["rejIndices"]
|
198 |
+
iteration = data["iteration"]
|
199 |
+
user_name = data["username"]
|
200 |
+
sys.path.append(CONTENT_PATH)
|
201 |
+
|
202 |
+
# default setting al_train is light version, we only save the last epoch
|
203 |
+
|
204 |
+
timevis = initialize_backend(CONTENT_PATH, dense_al=False)
|
205 |
+
timevis.save_acc_and_rej(iteration, acc_idxs, rej_idxs, user_name)
|
206 |
+
timevis.al_train(iteration, acc_idxs)
|
207 |
+
|
208 |
+
from config import config
|
209 |
+
NEW_ITERATION = timevis.get_max_iter()
|
210 |
+
timevis.vis_train(NEW_ITERATION, **config)
|
211 |
+
|
212 |
+
# update iteration projection
|
213 |
+
embedding_2d, grid, decision_view, label_name_dict, label_color_list, label_list, _, training_data_index, \
|
214 |
+
testing_data_index, eval_new, prediction_list, selected_points, properties = update_epoch_projection(timevis, NEW_ITERATION, dict())
|
215 |
+
|
216 |
+
# rewirte json =========
|
217 |
+
res_json_path = os.path.join(CONTENT_PATH, "iteration_structure.json")
|
218 |
+
with open(res_json_path,encoding='utf8')as fp:
|
219 |
+
json_data = json.load(fp)
|
220 |
+
|
221 |
+
json_data.append({'value': NEW_ITERATION, 'name': 'iteration', 'pid': iteration})
|
222 |
+
print('json_data',json_data)
|
223 |
+
with open(res_json_path,'w')as r:
|
224 |
+
json.dump(json_data, r)
|
225 |
+
r.close()
|
226 |
+
# rewirte json =========
|
227 |
+
|
228 |
+
del config
|
229 |
+
gc.collect()
|
230 |
+
|
231 |
+
sys.path.remove(CONTENT_PATH)
|
232 |
+
|
233 |
+
add_line(API_result_path,['al_train', user_name])
|
234 |
+
return make_response(jsonify({'result': embedding_2d, 'grid_index': grid, 'grid_color': 'data:image/png;base64,' + decision_view,
|
235 |
+
'label_name_dict': label_name_dict,
|
236 |
+
'label_color_list': label_color_list, 'label_list': label_list,
|
237 |
+
'maximum_iteration': NEW_ITERATION, 'training_data': training_data_index,
|
238 |
+
'testing_data': testing_data_index, 'evaluation': eval_new,
|
239 |
+
'prediction_list': prediction_list,
|
240 |
+
"selectedPoints":selected_points.tolist(),
|
241 |
+
"properties":properties.tolist()}), 200)
|
242 |
+
|
243 |
+
def clear_cache(con_paths):
|
244 |
+
for CONTENT_PATH in con_paths.values():
|
245 |
+
ac_flag = False
|
246 |
+
target_path = os.path.join(CONTENT_PATH, "Model")
|
247 |
+
dir_list = os.listdir(target_path)
|
248 |
+
for dir in dir_list:
|
249 |
+
if "Iteration_" in dir:
|
250 |
+
ac_flag=True
|
251 |
+
i = int(dir.replace("Iteration_", ""))
|
252 |
+
if i > 2:
|
253 |
+
shutil.rmtree(os.path.join(target_path, dir))
|
254 |
+
if ac_flag:
|
255 |
+
iter_structure_path = os.path.join(CONTENT_PATH, "iteration_structure.json")
|
256 |
+
with open(iter_structure_path, "r") as f:
|
257 |
+
i_s = json.load(f)
|
258 |
+
new_is = list()
|
259 |
+
for item in i_s:
|
260 |
+
value = item["value"]
|
261 |
+
if value < 3:
|
262 |
+
new_is.append(item)
|
263 |
+
with open(iter_structure_path, "w") as f:
|
264 |
+
json.dump(new_is, f)
|
265 |
+
print("Successfully remove cache data!")
|
266 |
+
|
267 |
+
|
268 |
+
#mock
|
269 |
+
@app.route('/login', methods=["POST"])
|
270 |
+
@cross_origin()
|
271 |
+
def login():
|
272 |
+
data = request.get_json()
|
273 |
+
username = data["username"]
|
274 |
+
password = data["password"]
|
275 |
+
content_path = data["content_path"]
|
276 |
+
|
277 |
+
# Verify username and password
|
278 |
+
# if pass return normal_content_path and anormaly_content_path
|
279 |
+
if username == 'admin-e' and password == '123qwe':
|
280 |
+
# con_paths = {"normal_content_path": content_path,"unormaly_content_path":content_path}
|
281 |
+
# clear_cache(con_paths)
|
282 |
+
return make_response(jsonify({"normal_content_path": content_path, "unormaly_content_path": content_path}), 200)
|
283 |
+
else:
|
284 |
+
return make_response(jsonify({"message":"username or password is wrong"}), 200)
|
285 |
+
|
286 |
+
@app.route('/boundingbox_record', methods=["POST"])
|
287 |
+
@cross_origin()
|
288 |
+
def record_bb():
|
289 |
+
data = request.get_json()
|
290 |
+
username = data['username']
|
291 |
+
add_line(API_result_path,['boundingbox', username])
|
292 |
+
return make_response(jsonify({}), 200)
|
293 |
+
|
294 |
+
@app.route('/all_result_list', methods=["POST"])
|
295 |
+
@cross_origin()
|
296 |
+
def get_res():
|
297 |
+
data = request.get_json()
|
298 |
+
CONTENT_PATH = os.path.normpath(data['content_path'])
|
299 |
+
# iteration_s = data["iteration_start"]
|
300 |
+
# iteration_e = data["iteration_end"]
|
301 |
+
predicates = dict() # placeholder
|
302 |
+
|
303 |
+
results = dict()
|
304 |
+
imglist = dict()
|
305 |
+
gridlist = dict()
|
306 |
+
|
307 |
+
sys.path.append(CONTENT_PATH)
|
308 |
+
|
309 |
+
username = data["username"]
|
310 |
+
|
311 |
+
from config import config
|
312 |
+
EPOCH_START = config["EPOCH_START"]
|
313 |
+
EPOCH_PERIOD = config["EPOCH_PERIOD"]
|
314 |
+
EPOCH_END = config["EPOCH_END"]
|
315 |
+
|
316 |
+
# TODO Interval to be decided
|
317 |
+
epoch_num = (EPOCH_END - EPOCH_START)// EPOCH_PERIOD + 1
|
318 |
+
|
319 |
+
for i in range(1, epoch_num+1, 1):
|
320 |
+
EPOCH = (i-1)*EPOCH_PERIOD + EPOCH_START
|
321 |
+
|
322 |
+
timevis = initialize_backend(CONTENT_PATH)
|
323 |
+
|
324 |
+
# detect whether we have query before
|
325 |
+
fname = "Epoch" if timevis.data_provider.mode == "normal" or timevis.data_provider.mode == "abnormal" else "Iteration"
|
326 |
+
bgimg_path = os.path.join(timevis.data_provider.model_path, "{}_{}".format(fname, EPOCH), "bgimg.png")
|
327 |
+
embedding_path = os.path.join(timevis.data_provider.model_path, "{}_{}".format(fname, EPOCH), "embedding.npy")
|
328 |
+
grid_path = os.path.join(timevis.data_provider.model_path, "{}_{}".format(fname, EPOCH), "grid.pkl")
|
329 |
+
if os.path.exists(bgimg_path) and os.path.exists(embedding_path) and os.path.exists(grid_path):
|
330 |
+
path = os.path.join(timevis.data_provider.model_path, "{}_{}".format(fname, EPOCH))
|
331 |
+
result_path = os.path.join(path,"embedding.npy")
|
332 |
+
results[str(i)] = np.load(result_path).tolist()
|
333 |
+
with open(os.path.join(path, "grid.pkl"), "rb") as f:
|
334 |
+
grid = pickle.load(f)
|
335 |
+
gridlist[str(i)] = grid
|
336 |
+
else:
|
337 |
+
embedding_2d, grid, _, _, _, _, _, _, _, _, _, _, _ = update_epoch_projection(timevis, EPOCH, predicates)
|
338 |
+
results[str(i)] = embedding_2d
|
339 |
+
gridlist[str(i)] = grid
|
340 |
+
# read background img
|
341 |
+
with open(bgimg_path, 'rb') as img_f:
|
342 |
+
img_stream = img_f.read()
|
343 |
+
img_stream = base64.b64encode(img_stream).decode()
|
344 |
+
imglist[str(i)] = 'data:image/png;base64,' + img_stream
|
345 |
+
# imglist[str(i)] = "http://{}{}".format(ip_adress, bgimg_path)
|
346 |
+
sys.path.remove(CONTENT_PATH)
|
347 |
+
|
348 |
+
del config
|
349 |
+
gc.collect()
|
350 |
+
|
351 |
+
add_line(API_result_path,['animation', username])
|
352 |
+
return make_response(jsonify({"results":results,"bgimgList":imglist, "grid": gridlist}), 200)
|
353 |
+
|
354 |
+
@app.route('/get_itertaion_structure', methods=["POST", "GET"])
|
355 |
+
@cross_origin()
|
356 |
+
def get_tree():
|
357 |
+
CONTENT_PATH = request.args.get("path")
|
358 |
+
res_json_path = os.path.join(CONTENT_PATH, "iteration_structure.json")
|
359 |
+
#mock
|
360 |
+
# res_json_path = os.path.join("./iteration_structure.json")
|
361 |
+
with open(res_json_path,encoding='utf8')as fp:
|
362 |
+
json_data = json.load(fp)
|
363 |
+
return make_response(jsonify({"structure":json_data}), 200)
|
364 |
+
|
365 |
+
def check_port_inuse(port, host):
|
366 |
+
try:
|
367 |
+
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
368 |
+
s.settimeout(1)
|
369 |
+
s.connect((host, port))
|
370 |
+
return True
|
371 |
+
except socket.error:
|
372 |
+
return False
|
373 |
+
finally:
|
374 |
+
if s:
|
375 |
+
s.close()
|
376 |
+
|
377 |
+
if __name__ == "__main__":
|
378 |
+
import socket
|
379 |
+
hostname = socket.gethostname()
|
380 |
+
ip_address = socket.gethostbyname(hostname)
|
381 |
+
# with open('config.json', 'r') as f:
|
382 |
+
# config = json.load(f)
|
383 |
+
# ip_address = config["ServerIP"]
|
384 |
+
# port = config["ServerPort"]
|
385 |
+
port = 5000
|
386 |
+
while check_port_inuse(port, ip_address):
|
387 |
+
port = port + 1
|
388 |
+
app.run(host=ip_address, port=int(port))
|
saved_models/server/timevis_backend/__init__.py
ADDED
File without changes
|
saved_models/server/timevis_backend/__pycache__/__init__.cpython-37.pyc
ADDED
Binary file (191 Bytes). View file
|
|
saved_models/server/timevis_backend/__pycache__/backend_adapter.cpython-37.pyc
ADDED
Binary file (23 kB). View file
|
|
saved_models/server/timevis_backend/__pycache__/res_logging.cpython-37.pyc
ADDED
Binary file (574 Bytes). View file
|
|
saved_models/server/timevis_backend/__pycache__/utils.cpython-37.pyc
ADDED
Binary file (7.07 kB). View file
|
|
saved_models/server/timevis_backend/backend_adapter.py
ADDED
@@ -0,0 +1,765 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
'''This class serves as a intermediate layer for tensorboard frontend and timeVis backend'''
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
import json
|
5 |
+
import time
|
6 |
+
import torch
|
7 |
+
import numpy as np
|
8 |
+
import pickle
|
9 |
+
import shutil
|
10 |
+
|
11 |
+
import torch.nn
|
12 |
+
from torch.utils.data import DataLoader
|
13 |
+
from torch.utils.data import WeightedRandomSampler
|
14 |
+
import torchvision
|
15 |
+
|
16 |
+
from scipy.special import softmax
|
17 |
+
|
18 |
+
# timevis_path = "D:\\code-space\\DLVisDebugger" #limy
|
19 |
+
timevis_path = "../../DLVisDebugger" #xianglin#yvonne
|
20 |
+
sys.path.append(timevis_path)
|
21 |
+
from singleVis.utils import *
|
22 |
+
from singleVis.custom_weighted_random_sampler import CustomWeightedRandomSampler
|
23 |
+
from singleVis.edge_dataset import DataHandler, HybridDataHandler
|
24 |
+
from singleVis.spatial_edge_constructor import SingleEpochSpatialEdgeConstructor
|
25 |
+
# kcHybridDenseALSpatialEdgeConstructor,GlobalTemporalEdgeConstructor
|
26 |
+
from singleVis.trajectory_manager import Recommender
|
27 |
+
from singleVis.eval.evaluator import ALEvaluator
|
28 |
+
from singleVis.segmenter import DenseALSegmenter
|
29 |
+
|
30 |
+
|
31 |
+
# active_learning_path = "D:\\code-space\\ActiveLearning" # limy
|
32 |
+
active_learning_path = "../../ActiveLearning"
|
33 |
+
sys.path.append(active_learning_path)
|
34 |
+
|
35 |
+
class TimeVisBackend:
|
36 |
+
def __init__(self, data_provider, projector, vis, evaluator, **hyperparameters) -> None:
|
37 |
+
self.data_provider = data_provider
|
38 |
+
self.projector = projector
|
39 |
+
self.vis = vis
|
40 |
+
self.evaluator = evaluator
|
41 |
+
self.hyperparameters = hyperparameters
|
42 |
+
#################################################################################################################
|
43 |
+
# #
|
44 |
+
# data Panel #
|
45 |
+
# #
|
46 |
+
#################################################################################################################
|
47 |
+
|
48 |
+
def batch_inv_preserve(self, epoch, data):
|
49 |
+
"""
|
50 |
+
get inverse confidence for a single point
|
51 |
+
:param epoch: int
|
52 |
+
:param data: numpy.ndarray
|
53 |
+
:return l: boolean, whether reconstruction data have the same prediction
|
54 |
+
:return conf_diff: float, (0, 1), confidence difference
|
55 |
+
"""
|
56 |
+
embedding = self.projector.batch_project(epoch, data)
|
57 |
+
recon = self.projector.batch_inverse(epoch, embedding)
|
58 |
+
|
59 |
+
ori_pred = self.data_provider.get_pred(epoch, data)
|
60 |
+
new_pred = self.data_provider.get_pred(epoch, recon)
|
61 |
+
ori_pred = softmax(ori_pred, axis=1)
|
62 |
+
new_pred = softmax(new_pred, axis=1)
|
63 |
+
|
64 |
+
old_label = ori_pred.argmax(-1)
|
65 |
+
new_label = new_pred.argmax(-1)
|
66 |
+
l = old_label == new_label
|
67 |
+
|
68 |
+
old_conf = [ori_pred[i, old_label[i]] for i in range(len(old_label))]
|
69 |
+
new_conf = [new_pred[i, old_label[i]] for i in range(len(old_label))]
|
70 |
+
old_conf = np.array(old_conf)
|
71 |
+
new_conf = np.array(new_conf)
|
72 |
+
|
73 |
+
conf_diff = old_conf - new_conf
|
74 |
+
return l, conf_diff
|
75 |
+
|
76 |
+
#################################################################################################################
|
77 |
+
# #
|
78 |
+
# Search Panel #
|
79 |
+
# #
|
80 |
+
#################################################################################################################
|
81 |
+
|
82 |
+
# TODO: fix bugs accroding to new api
|
83 |
+
# customized features
|
84 |
+
def filter_label(self, label, epoch_id):
|
85 |
+
try:
|
86 |
+
index = self.data_provider.classes.index(label)
|
87 |
+
except:
|
88 |
+
index = -1
|
89 |
+
train_labels = self.data_provider.train_labels(epoch_id)
|
90 |
+
test_labels = self.data_provider.test_labels(epoch_id)
|
91 |
+
labels = np.concatenate((train_labels, test_labels), 0)
|
92 |
+
idxs = np.argwhere(labels == index)
|
93 |
+
idxs = np.squeeze(idxs)
|
94 |
+
return idxs
|
95 |
+
|
96 |
+
def filter_type(self, type, epoch_id):
|
97 |
+
if type == "train":
|
98 |
+
res = self.get_epoch_index(epoch_id)
|
99 |
+
elif type == "test":
|
100 |
+
train_num = self.data_provider.train_num
|
101 |
+
test_num = self.data_provider.test_num
|
102 |
+
res = list(range(train_num, train_num+ test_num, 1))
|
103 |
+
elif type == "unlabel":
|
104 |
+
labeled = np.array(self.get_epoch_index(epoch_id))
|
105 |
+
train_num = self.data_provider.train_num
|
106 |
+
all_data = np.arange(train_num)
|
107 |
+
unlabeled = np.setdiff1d(all_data, labeled)
|
108 |
+
res = unlabeled.tolist()
|
109 |
+
else:
|
110 |
+
# all data
|
111 |
+
train_num = self.data_provider.train_num
|
112 |
+
test_num = self.data_provider.test_num
|
113 |
+
res = list(range(0, train_num + test_num, 1))
|
114 |
+
return res
|
115 |
+
|
116 |
+
def filter_conf(self, conf_min, conf_max, epoch_id):
|
117 |
+
train_data = self.data_provider.train_representation(epoch_id)
|
118 |
+
test_data =self.data_provider.test_representation(epoch_id)
|
119 |
+
data = np.concatenate((train_data, test_data), axis=0)
|
120 |
+
pred = self.data_provider.get_pred(epoch_id, data)
|
121 |
+
scores = np.amax(softmax(pred, axis=1), axis=1)
|
122 |
+
res = np.argwhere(np.logical_and(scores<=conf_max, scores>=conf_min)).squeeze().tolist()
|
123 |
+
return res
|
124 |
+
|
125 |
+
|
126 |
+
#################################################################################################################
|
127 |
+
# #
|
128 |
+
# Helper Functions #
|
129 |
+
# #
|
130 |
+
#################################################################################################################
|
131 |
+
|
132 |
+
def save_acc_and_rej(self, acc_idxs, rej_idxs, file_name):
|
133 |
+
d = {
|
134 |
+
"acc_idxs": acc_idxs,
|
135 |
+
"rej_idxs": rej_idxs
|
136 |
+
}
|
137 |
+
path = os.path.join(self.data_provider.content_path, "{}_acc_rej.json".format(file_name))
|
138 |
+
with open(path, "w") as f:
|
139 |
+
json.dump(d, f)
|
140 |
+
print("Successfully save the acc and rej idxs selected by user...")
|
141 |
+
|
142 |
+
def get_epoch_index(self, epoch_id):
|
143 |
+
"""get the training data index for an epoch"""
|
144 |
+
index_file = os.path.join(self.data_provider.model_path, "Epoch_{:d}".format(epoch_id), "index.json")
|
145 |
+
index = load_labelled_data_index(index_file)
|
146 |
+
return index
|
147 |
+
|
148 |
+
def reset(self):
|
149 |
+
return
|
150 |
+
|
151 |
+
|
152 |
+
class ActiveLearningTimeVisBackend(TimeVisBackend):
|
153 |
+
def __init__(self, data_provider, projector, trainer, vis, evaluator, dense, **hyperparameters) -> None:
|
154 |
+
super().__init__(data_provider, projector, vis, evaluator, **hyperparameters)
|
155 |
+
self.trainer = trainer
|
156 |
+
self.dense = dense
|
157 |
+
|
158 |
+
def save_acc_and_rej(self, iteration, acc_idxs, rej_idxs, file_name):
|
159 |
+
d = {
|
160 |
+
"acc_idxs": acc_idxs,
|
161 |
+
"rej_idxs": rej_idxs
|
162 |
+
}
|
163 |
+
path = os.path.join(self.data_provider.content_path, "Model", "Iteration_{}".format(iteration), "{}_acc_rej.json".format(file_name))
|
164 |
+
with open(path, "w") as f:
|
165 |
+
json.dump(d, f)
|
166 |
+
print("Successfully save the acc and rej idxs selected by user at Iteration {}...".format(iteration))
|
167 |
+
|
168 |
+
def reset(self, iteration):
|
169 |
+
# delete [iteration,...)
|
170 |
+
max_i = self.get_max_iter()
|
171 |
+
for i in range(iteration, max_i+1, 1):
|
172 |
+
path = os.path.join(self.data_provider.content_path, "Model", "Iteration_{}".format(i))
|
173 |
+
shutil.rmtree(path)
|
174 |
+
iter_structure_path = os.path.join(self.data_provider.content_path, "iteration_structure.json")
|
175 |
+
with open(iter_structure_path, "r") as f:
|
176 |
+
i_s = json.load(f)
|
177 |
+
new_is = list()
|
178 |
+
for item in i_s:
|
179 |
+
value = item["value"]
|
180 |
+
if value < iteration:
|
181 |
+
new_is.append(item)
|
182 |
+
with open(iter_structure_path, "w") as f:
|
183 |
+
json.dump(new_is, f)
|
184 |
+
print("Successfully remove cache data!")
|
185 |
+
|
186 |
+
def get_epoch_index(self, iteration):
|
187 |
+
"""get the training data index for an epoch"""
|
188 |
+
index_file = os.path.join(self.data_provider.model_path, "Iteration_{:d}".format(iteration), "index.json")
|
189 |
+
index = load_labelled_data_index(index_file)
|
190 |
+
return index
|
191 |
+
|
192 |
+
def al_query(self, iteration, budget, strategy, acc_idxs, rej_idxs):
|
193 |
+
"""get the index of new selection from different strategies"""
|
194 |
+
CONTENT_PATH = self.data_provider.content_path
|
195 |
+
NUM_QUERY = budget
|
196 |
+
GPU = self.hyperparameters["GPU"]
|
197 |
+
NET = self.hyperparameters["TRAINING"]["NET"]
|
198 |
+
DATA_NAME = self.hyperparameters["DATASET"]
|
199 |
+
sys.path.append(CONTENT_PATH)
|
200 |
+
|
201 |
+
# record output information
|
202 |
+
now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
|
203 |
+
sys.stdout = open(os.path.join(CONTENT_PATH, now+".txt"), "w")
|
204 |
+
|
205 |
+
# loading neural network
|
206 |
+
import Model.model as subject_model
|
207 |
+
task_model = eval("subject_model.{}()".format(NET))
|
208 |
+
task_model_type = "pytorch"
|
209 |
+
# start experiment
|
210 |
+
n_pool = self.hyperparameters["TRAINING"]["train_num"] # 50000
|
211 |
+
n_test = self.hyperparameters["TRAINING"]['test_num'] # 10000
|
212 |
+
|
213 |
+
resume_path = os.path.join(CONTENT_PATH, "Model", "Iteration_{}".format(iteration))
|
214 |
+
|
215 |
+
idxs_lb = np.array(json.load(open(os.path.join(resume_path, "index.json"), "r")))
|
216 |
+
|
217 |
+
state_dict = torch.load(os.path.join(resume_path, "subject_model.pth"), map_location=torch.device('cpu'))
|
218 |
+
task_model.load_state_dict(state_dict)
|
219 |
+
NUM_INIT_LB = len(idxs_lb)
|
220 |
+
|
221 |
+
print('resume from iteration {}'.format(iteration))
|
222 |
+
print('number of labeled pool: {}'.format(NUM_INIT_LB))
|
223 |
+
print('number of unlabeled pool: {}'.format(n_pool - NUM_INIT_LB))
|
224 |
+
print('number of testing pool: {}'.format(n_test))
|
225 |
+
|
226 |
+
# here the training handlers and testing handlers are different
|
227 |
+
complete_dataset = torchvision.datasets.CIFAR10(root="..//data//CIFAR10", download=True, train=True, transform=self.hyperparameters["TRAINING"]['transform_te'])
|
228 |
+
|
229 |
+
if strategy == "Random":
|
230 |
+
from query_strategies.random import RandomSampling
|
231 |
+
idxs_selected = np.concatenate((acc_idxs.astype(np.int64), rej_idxs.astype(np.int64)), axis=0)
|
232 |
+
curr_lb = np.concatenate((idxs_lb, idxs_selected), axis=0)
|
233 |
+
q_strategy = RandomSampling(task_model, task_model_type, n_pool, curr_lb, 10, DATA_NAME, NET, gpu=GPU, **self.hyperparameters["TRAINING"])
|
234 |
+
# print information
|
235 |
+
print(DATA_NAME)
|
236 |
+
print(type(q_strategy).__name__)
|
237 |
+
print('================Round {:d}==============='.format(iteration+1))
|
238 |
+
# query new samples
|
239 |
+
t0 = time.time()
|
240 |
+
new_indices, scores = q_strategy.query(NUM_QUERY)
|
241 |
+
t1 = time.time()
|
242 |
+
print("Query time is {:.2f}".format(t1-t0))
|
243 |
+
elif strategy == "Uncertainty":
|
244 |
+
from query_strategies.LeastConfidence import LeastConfidenceSampling
|
245 |
+
idxs_selected = np.concatenate((acc_idxs.astype(np.int64), rej_idxs.astype(np.int64)), axis=0)
|
246 |
+
curr_lb = np.concatenate((idxs_lb, idxs_selected), axis=0)
|
247 |
+
q_strategy = LeastConfidenceSampling(task_model, task_model_type, n_pool, curr_lb, 10, DATA_NAME, NET, gpu=GPU, **self.hyperparameters["TRAINING"])
|
248 |
+
# print information
|
249 |
+
print(DATA_NAME)
|
250 |
+
print(type(q_strategy).__name__)
|
251 |
+
print('================Round {:d}==============='.format(iteration+1))
|
252 |
+
# query new samples
|
253 |
+
t0 = time.time()
|
254 |
+
new_indices, scores = q_strategy.query(complete_dataset, NUM_QUERY, idxs_selected)
|
255 |
+
t1 = time.time()
|
256 |
+
print("Query time is {:.2f}".format(t1-t0))
|
257 |
+
# elif strategy == "Diversity":
|
258 |
+
# from query_strategies.coreset import CoreSetSampling
|
259 |
+
# q_strategy = CoreSetSampling(task_model, task_model_type, n_pool, 512, idxs_lb, DATA_NAME, NET, gpu=GPU, **self.hyperparameters["TRAINING"])
|
260 |
+
# # print information
|
261 |
+
# print(DATA_NAME)
|
262 |
+
# print(type(q_strategy).__name__)
|
263 |
+
# print('================Round {:d}==============='.format(iteration+1))
|
264 |
+
# embedding = q_strategy.get_embedding(complete_dataset)
|
265 |
+
# # query new samples
|
266 |
+
# t0 = time.time()
|
267 |
+
# new_indices, scores = q_strategy.query(embedding, NUM_QUERY)
|
268 |
+
# t1 = time.time()
|
269 |
+
# print("Query time is {:.2f}".format(t1-t0))
|
270 |
+
|
271 |
+
# elif strategy == "Hybrid":
|
272 |
+
# from query_strategies.badge import BadgeSampling
|
273 |
+
# q_strategy = BadgeSampling(task_model, task_model_type, n_pool, 512, idxs_lb, 10, DATA_NAME, NET, gpu=GPU, **self.hyperparameters["TRAINING"])
|
274 |
+
# # print information
|
275 |
+
# print(DATA_NAME)
|
276 |
+
# print(type(q_strategy).__name__)
|
277 |
+
# print('================Round {:d}==============='.format(iteration+1))
|
278 |
+
# # query new samples
|
279 |
+
# t0 = time.time()
|
280 |
+
# new_indices, scores = q_strategy.query(complete_dataset, NUM_QUERY)
|
281 |
+
# t1 = time.time()
|
282 |
+
# print("Query time is {:.2f}".format(t1-t0))
|
283 |
+
elif strategy == "TBSampling":
|
284 |
+
# TODO hard coded parameters...
|
285 |
+
period = 80
|
286 |
+
print(DATA_NAME)
|
287 |
+
print("TBSampling")
|
288 |
+
print('================Round {:d}==============='.format(iteration+1))
|
289 |
+
t0 = time.time()
|
290 |
+
new_indices, scores = self._suggest_abnormal(strategy, iteration, idxs_lb, acc_idxs, rej_idxs, budget, period)
|
291 |
+
t1 = time.time()
|
292 |
+
print("Query time is {:.2f}".format(t1-t0))
|
293 |
+
|
294 |
+
elif strategy == "Feedback":
|
295 |
+
# TODO hard coded parameters...suggest_abnormal
|
296 |
+
period = 80
|
297 |
+
print(DATA_NAME)
|
298 |
+
print("Feedback")
|
299 |
+
print('================Round {:d}==============='.format(iteration+1))
|
300 |
+
t0 = time.time()
|
301 |
+
new_indices, scores = self._suggest_abnormal(strategy, iteration, idxs_lb, acc_idxs, rej_idxs, budget, period)
|
302 |
+
t1 = time.time()
|
303 |
+
print("Query time is {:.2f}".format(t1-t0))
|
304 |
+
else:
|
305 |
+
raise NotImplementedError
|
306 |
+
|
307 |
+
# TODO return the suggest labels, need to develop pesudo label generation technique in the future
|
308 |
+
true_labels = self.data_provider.train_labels(iteration)
|
309 |
+
|
310 |
+
return new_indices, true_labels[new_indices], scores
|
311 |
+
|
312 |
+
def al_train(self, iteration, indices):
|
313 |
+
CONTENT_PATH = self.data_provider.content_path
|
314 |
+
# record output information
|
315 |
+
now = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
|
316 |
+
sys.stdout = open(os.path.join(CONTENT_PATH, now+".txt"), "w")
|
317 |
+
|
318 |
+
# for reproduce purpose
|
319 |
+
print("New indices:\t{}".format(len(indices)))
|
320 |
+
self.save_human_selection(iteration, indices)
|
321 |
+
lb_idx = self.get_epoch_index(iteration)
|
322 |
+
train_idx = np.hstack((lb_idx, indices))
|
323 |
+
print("Training indices:\t{}".format(len(train_idx)))
|
324 |
+
print("Valid indices:\t{}".format(len(set(train_idx))))
|
325 |
+
|
326 |
+
TOTAL_EPOCH = self.hyperparameters["TRAINING"]["total_epoch"]
|
327 |
+
NET = self.hyperparameters["TRAINING"]["NET"]
|
328 |
+
DEVICE = self.data_provider.DEVICE
|
329 |
+
NEW_ITERATION = self.get_max_iter() + 1
|
330 |
+
GPU = self.hyperparameters["GPU"]
|
331 |
+
DATA_NAME = self.hyperparameters["DATASET"]
|
332 |
+
sys.path.append(CONTENT_PATH)
|
333 |
+
|
334 |
+
# loading neural network
|
335 |
+
from Model.model import resnet18
|
336 |
+
task_model = resnet18()
|
337 |
+
resume_path = os.path.join(CONTENT_PATH, "Model", "Iteration_{}".format(iteration))
|
338 |
+
state_dict = torch.load(os.path.join(resume_path, "subject_model.pth"), map_location=torch.device("cpu"))
|
339 |
+
task_model.load_state_dict(state_dict)
|
340 |
+
|
341 |
+
self.save_iteration_index(NEW_ITERATION, train_idx)
|
342 |
+
task_model_type = "pytorch"
|
343 |
+
# start experiment
|
344 |
+
n_pool = self.hyperparameters["TRAINING"]["train_num"] # 50000
|
345 |
+
save_path = os.path.join(CONTENT_PATH, "Model", "Iteration_{}".format(NEW_ITERATION))
|
346 |
+
os.makedirs(save_path, exist_ok=True)
|
347 |
+
|
348 |
+
from query_strategies.random import RandomSampling
|
349 |
+
q_strategy = RandomSampling(task_model, task_model_type, n_pool, lb_idx, 10, DATA_NAME, NET, gpu=GPU, **self.hyperparameters["TRAINING"])
|
350 |
+
# print information
|
351 |
+
print('================Round {:d}==============='.format(NEW_ITERATION))
|
352 |
+
# update
|
353 |
+
q_strategy.update_lb_idxs(train_idx)
|
354 |
+
resnet_model = resnet18()
|
355 |
+
train_dataset = torchvision.datasets.CIFAR10(root="..//data//CIFAR10", download=True, train=True, transform=self.hyperparameters["TRAINING"]['transform_tr'])
|
356 |
+
test_dataset = torchvision.datasets.CIFAR10(root="..//data//CIFAR10", download=True, train=False, transform=self.hyperparameters["TRAINING"]['transform_te'])
|
357 |
+
t1 = time.time()
|
358 |
+
q_strategy.train(total_epoch=TOTAL_EPOCH, task_model=resnet_model, complete_dataset=train_dataset,save_path=None)
|
359 |
+
t2 = time.time()
|
360 |
+
print("Training time is {:.2f}".format(t2-t1))
|
361 |
+
self.save_subject_model(NEW_ITERATION, q_strategy.task_model.state_dict())
|
362 |
+
|
363 |
+
# compute accuracy at each round
|
364 |
+
accu = q_strategy.test_accu(test_dataset)
|
365 |
+
print('Accuracy {:.3f}'.format(100*accu))
|
366 |
+
|
367 |
+
|
368 |
+
def get_max_iter(self):
|
369 |
+
path = os.path.join(self.data_provider.content_path, "Model")
|
370 |
+
dir_list = os.listdir(path)
|
371 |
+
max_iter = -1
|
372 |
+
for dir in dir_list:
|
373 |
+
if "Iteration_" in dir:
|
374 |
+
i = int(dir.replace("Iteration_",""))
|
375 |
+
max_iter = max(max_iter, i)
|
376 |
+
return max_iter
|
377 |
+
|
378 |
+
def save_human_selection(self, iteration, indices):
|
379 |
+
"""
|
380 |
+
save the selected index message from DVI frontend
|
381 |
+
:param epoch_id:
|
382 |
+
:param indices: list, selected indices
|
383 |
+
:return:
|
384 |
+
"""
|
385 |
+
save_location = os.path.join(self.data_provider.model_path, "Iteration_{}".format(iteration), "human_select.json")
|
386 |
+
with open(save_location, "w") as f:
|
387 |
+
json.dump(indices, f)
|
388 |
+
|
389 |
+
def save_iteration_index(self, iteration, idxs):
|
390 |
+
new_iteration_dir = os.path.join(self.data_provider.content_path, "Model", "Iteration_{}".format(iteration))
|
391 |
+
os.makedirs(new_iteration_dir, exist_ok=True)
|
392 |
+
save_location = os.path.join(new_iteration_dir, "index.json")
|
393 |
+
with open(save_location, "w") as f:
|
394 |
+
json.dump(idxs.tolist(), f)
|
395 |
+
|
396 |
+
def save_subject_model(self, iteration, state_dict):
|
397 |
+
new_iteration_dir = os.path.join(self.data_provider.content_path, "Model", "Iteration_{}".format(iteration))
|
398 |
+
model_path = os.path.join(new_iteration_dir, "subject_model.pth")
|
399 |
+
torch.save(state_dict, model_path)
|
400 |
+
|
401 |
+
|
402 |
+
def vis_train(self, iteration, **config):
|
403 |
+
# preprocess
|
404 |
+
PREPROCESS = config["VISUALIZATION"]["PREPROCESS"]
|
405 |
+
B_N_EPOCHS = config["VISUALIZATION"]["BOUNDARY"]["B_N_EPOCHS"]
|
406 |
+
L_BOUND = config["VISUALIZATION"]["BOUNDARY"]["L_BOUND"]
|
407 |
+
if PREPROCESS:
|
408 |
+
self.data_provider._meta_data(iteration)
|
409 |
+
if B_N_EPOCHS != 0:
|
410 |
+
LEN = len(self.data_provider.train_labels(iteration))
|
411 |
+
self.data_provider._estimate_boundary(iteration, LEN//10, l_bound=L_BOUND)
|
412 |
+
|
413 |
+
# train visualization model
|
414 |
+
CLASSES = config["CLASSES"]
|
415 |
+
DATASET = config["DATASET"]
|
416 |
+
# DEVICE = torch.device("cuda:{:}".format(GPU_ID) if torch.cuda.is_available() else "cpu")
|
417 |
+
################################################# VISUALIZATION PARAMETERS ########################################
|
418 |
+
PREPROCESS = config["VISUALIZATION"]["PREPROCESS"]
|
419 |
+
B_N_EPOCHS = config["VISUALIZATION"]["BOUNDARY"]["B_N_EPOCHS"]
|
420 |
+
L_BOUND = config["VISUALIZATION"]["BOUNDARY"]["L_BOUND"]
|
421 |
+
LAMBDA = config["VISUALIZATION"]["LAMBDA"]
|
422 |
+
HIDDEN_LAYER = config["VISUALIZATION"]["HIDDEN_LAYER"]
|
423 |
+
N_NEIGHBORS = config["VISUALIZATION"]["N_NEIGHBORS"]
|
424 |
+
MAX_EPOCH = config["VISUALIZATION"]["MAX_EPOCH"]
|
425 |
+
S_N_EPOCHS = config["VISUALIZATION"]["S_N_EPOCHS"]
|
426 |
+
PATIENT = config["VISUALIZATION"]["PATIENT"]
|
427 |
+
VIS_MODEL_NAME = config["VISUALIZATION"]["VIS_MODEL_NAME"]
|
428 |
+
RESOLUTION = config["VISUALIZATION"]["RESOLUTION"]
|
429 |
+
EVALUATION_NAME = config["VISUALIZATION"]["EVALUATION_NAME"]
|
430 |
+
NET = config["TRAINING"]["NET"]
|
431 |
+
|
432 |
+
if self.dense:
|
433 |
+
# TODO test this part
|
434 |
+
raise NotImplementedError
|
435 |
+
epoch_num = config["TRAINING"]["total_epoch"]
|
436 |
+
INIT_NUM = config["VISUALIZATION"]["INIT_NUM"]
|
437 |
+
MAX_HAUSDORFF = config["VISUALIZATION"]["MAX_HAUSDORFF"]
|
438 |
+
ALPHA = config["VISUALIZATION"]["ALPHA"]
|
439 |
+
BETA = config["VISUALIZATION"]["BETA"]
|
440 |
+
T_N_EPOCHS = config["VISUALIZATION"]["T_N_EPOCHS"]
|
441 |
+
|
442 |
+
segmenter = DenseALSegmenter(data_provider=self.data_provider, threshold=78.5, epoch_num=epoch_num)
|
443 |
+
# segment epoch
|
444 |
+
t0 = time.time()
|
445 |
+
SEGMENTS = segmenter.segment(iteration)
|
446 |
+
t1 = time.time()
|
447 |
+
print(SEGMENTS)
|
448 |
+
|
449 |
+
segment_path = os.path.join(self.data_provider.content_path, "Model", "Iteration_{}".format(iteration),"segments.json")
|
450 |
+
with open(segment_path, "w") as f:
|
451 |
+
json.dump(SEGMENTS, f)
|
452 |
+
|
453 |
+
LEN = self.data_provider.label_num(iteration)
|
454 |
+
prev_selected = np.random.choice(np.arange(LEN), size=INIT_NUM, replace=False)
|
455 |
+
prev_embedding = None
|
456 |
+
start_point = len(SEGMENTS)-1
|
457 |
+
c0=None
|
458 |
+
d0=None
|
459 |
+
|
460 |
+
for seg in range(start_point,-1,-1):
|
461 |
+
epoch_start, epoch_end = SEGMENTS[seg]
|
462 |
+
self.data_provider.update_interval(epoch_s=epoch_start, epoch_e=epoch_end)
|
463 |
+
|
464 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=.01, weight_decay=1e-5)
|
465 |
+
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=.1)
|
466 |
+
|
467 |
+
t2 = time.time()
|
468 |
+
spatial_cons = kcHybridDenseALSpatialEdgeConstructor(data_provider=self.data_provider, init_num=INIT_NUM, s_n_epochs=S_N_EPOCHS, b_n_epochs=B_N_EPOCHS, n_neighbors=N_NEIGHBORS, MAX_HAUSDORFF=MAX_HAUSDORFF, ALPHA=ALPHA, BETA=BETA, iteration=iteration, init_idxs=prev_selected, init_embeddings=prev_embedding, c0=c0, d0=d0)
|
469 |
+
s_edge_to, s_edge_from, s_probs, feature_vectors, embedded, coefficient, time_step_nums, time_step_idxs_list, knn_indices, sigmas, rhos, attention, (c0,d0) = spatial_cons.construct()
|
470 |
+
|
471 |
+
temporal_cons = GlobalTemporalEdgeConstructor(X=feature_vectors, time_step_nums=time_step_nums, sigmas=sigmas, rhos=rhos, n_neighbors=N_NEIGHBORS, n_epochs=T_N_EPOCHS)
|
472 |
+
t_edge_to, t_edge_from, t_probs = temporal_cons.construct()
|
473 |
+
t3 = time.time()
|
474 |
+
|
475 |
+
edge_to = np.concatenate((s_edge_to, t_edge_to),axis=0)
|
476 |
+
edge_from = np.concatenate((s_edge_from, t_edge_from), axis=0)
|
477 |
+
probs = np.concatenate((s_probs, t_probs), axis=0)
|
478 |
+
probs = probs / (probs.max()+1e-3)
|
479 |
+
eliminate_zeros = probs>1e-3
|
480 |
+
edge_to = edge_to[eliminate_zeros]
|
481 |
+
edge_from = edge_from[eliminate_zeros]
|
482 |
+
probs = probs[eliminate_zeros]
|
483 |
+
|
484 |
+
# save result
|
485 |
+
save_dir = os.path.join(self.data_provider.model_path, "Iteration_{}".format(iteration), "SV_time_al_hybrid.json")
|
486 |
+
if not os.path.exists(save_dir):
|
487 |
+
evaluation = dict()
|
488 |
+
else:
|
489 |
+
f = open(save_dir, "r")
|
490 |
+
evaluation = json.load(f)
|
491 |
+
f.close()
|
492 |
+
if "complex_construction" not in evaluation.keys():
|
493 |
+
evaluation["complex_construction"] = dict()
|
494 |
+
evaluation["complex_construction"][str(seg)] = round(t3-t2, 3)
|
495 |
+
with open(save_dir, 'w') as f:
|
496 |
+
json.dump(evaluation, f)
|
497 |
+
print("constructing timeVis complex for {}-th segment in {:.1f} seconds.".format(seg, t3-t2))
|
498 |
+
|
499 |
+
dataset = HybridDataHandler(edge_to, edge_from, feature_vectors, attention, embedded, coefficient)
|
500 |
+
n_samples = int(np.sum(S_N_EPOCHS * probs) // 1)
|
501 |
+
# chosse sampler based on the number of dataset
|
502 |
+
if len(edge_to) > 2^24:
|
503 |
+
sampler = CustomWeightedRandomSampler(probs, n_samples, replacement=True)
|
504 |
+
else:
|
505 |
+
sampler = WeightedRandomSampler(probs, n_samples, replacement=True)
|
506 |
+
|
507 |
+
edge_loader = DataLoader(dataset, batch_size=1000, sampler=sampler)
|
508 |
+
|
509 |
+
self.trainer.update_vis_model(model)
|
510 |
+
self.trainer.update_optimizer(optimizer)
|
511 |
+
self.trainer.update_lr_scheduler(lr_scheduler)
|
512 |
+
self.trainer.update_edge_loader(edge_loader)
|
513 |
+
|
514 |
+
t2=time.time()
|
515 |
+
self.trainer.train(PATIENT, MAX_EPOCH)
|
516 |
+
t3 = time.time()
|
517 |
+
# save result
|
518 |
+
save_dir = os.path.join(self.data_provider.model_path, "Iteration_{}".format(iteration), "SV_time_al_hybrid.json")
|
519 |
+
if not os.path.exists(save_dir):
|
520 |
+
evaluation = dict()
|
521 |
+
else:
|
522 |
+
f = open(save_dir, "r")
|
523 |
+
evaluation = json.load(f)
|
524 |
+
f.close()
|
525 |
+
|
526 |
+
if "training" not in evaluation.keys():
|
527 |
+
evaluation["training"] = dict()
|
528 |
+
evaluation["training"][str(seg)] = round(t3-t2, 3)
|
529 |
+
with open(save_dir, 'w') as f:
|
530 |
+
json.dump(evaluation, f)
|
531 |
+
self.trainer.save(save_dir=os.path.join(self.data_provider.model_path, "Iteration_{}".format(iteration)), file_name="{}_{}".format(VIS_MODEL_NAME, seg))
|
532 |
+
model = self.trainer.model
|
533 |
+
|
534 |
+
# update prev_idxs and prev_embedding
|
535 |
+
prev_selected = time_step_idxs_list[0]
|
536 |
+
prev_data = torch.from_numpy(feature_vectors[:len(prev_selected)]).to(dtype=torch.float32, device=self.data_provider.DEVICE)
|
537 |
+
model.to(device=self.data_provider.DEVICE)
|
538 |
+
prev_embedding = model.encoder(prev_data).cpu().detach().numpy()
|
539 |
+
# raise NotImplementedError
|
540 |
+
print("Successful train all visualization models!")
|
541 |
+
else:
|
542 |
+
t0 = time.time()
|
543 |
+
spatial_cons = SingleEpochSpatialEdgeConstructor(self.data_provider, iteration, S_N_EPOCHS, B_N_EPOCHS, 15)
|
544 |
+
edge_to, edge_from, probs, feature_vectors, attention = spatial_cons.construct()
|
545 |
+
t1 = time.time()
|
546 |
+
|
547 |
+
probs = probs / (probs.max()+1e-3)
|
548 |
+
eliminate_zeros = probs>1e-3
|
549 |
+
edge_to = edge_to[eliminate_zeros]
|
550 |
+
edge_from = edge_from[eliminate_zeros]
|
551 |
+
probs = probs[eliminate_zeros]
|
552 |
+
|
553 |
+
# save result
|
554 |
+
save_dir = os.path.join(self.data_provider.model_path, "SV_time_al.json")
|
555 |
+
if not os.path.exists(save_dir):
|
556 |
+
evaluation = dict()
|
557 |
+
else:
|
558 |
+
f = open(save_dir, "r")
|
559 |
+
evaluation = json.load(f)
|
560 |
+
f.close()
|
561 |
+
if "complex_construction" not in evaluation.keys():
|
562 |
+
evaluation["complex_construction"] = dict()
|
563 |
+
evaluation["complex_construction"][str(iteration)] = round(t1-t0, 3)
|
564 |
+
with open(save_dir, 'w') as f:
|
565 |
+
json.dump(evaluation, f)
|
566 |
+
print("constructing timeVis complex in {:.1f} seconds.".format(t1-t0))
|
567 |
+
|
568 |
+
dataset = DataHandler(edge_to, edge_from, feature_vectors, attention)
|
569 |
+
n_samples = int(np.sum(S_N_EPOCHS * probs) // 1)
|
570 |
+
# chosse sampler based on the number of dataset
|
571 |
+
if len(edge_to) > 2^24:
|
572 |
+
sampler = CustomWeightedRandomSampler(probs, n_samples, replacement=True)
|
573 |
+
else:
|
574 |
+
sampler = WeightedRandomSampler(probs, n_samples, replacement=True)
|
575 |
+
edge_loader = DataLoader(dataset, batch_size=512, sampler=sampler)
|
576 |
+
self.trainer.update_edge_loader(edge_loader)
|
577 |
+
|
578 |
+
t2=time.time()
|
579 |
+
self.trainer.train(PATIENT, MAX_EPOCH)
|
580 |
+
t3 = time.time()
|
581 |
+
# save result
|
582 |
+
save_dir = os.path.join(self.data_provider.model_path, "SV_time_al.json")
|
583 |
+
if not os.path.exists(save_dir):
|
584 |
+
evaluation = dict()
|
585 |
+
else:
|
586 |
+
f = open(save_dir, "r")
|
587 |
+
evaluation = json.load(f)
|
588 |
+
f.close()
|
589 |
+
if "training" not in evaluation.keys():
|
590 |
+
evaluation["training"] = dict()
|
591 |
+
evaluation["training"][str(iteration)] = round(t3-t2, 3)
|
592 |
+
with open(save_dir, 'w') as f:
|
593 |
+
json.dump(evaluation, f)
|
594 |
+
save_dir = os.path.join(self.data_provider.model_path, "Iteration_{}".format(iteration))
|
595 |
+
os.makedirs(save_dir, exist_ok=True)
|
596 |
+
self.trainer.save(save_dir=save_dir, file_name=VIS_MODEL_NAME)
|
597 |
+
# TODO evaluate visualization model, train and test
|
598 |
+
evaluator = ALEvaluator(self.data_provider, self.projector)
|
599 |
+
evaluator.save_epoch_eval(iteration, file_name=EVALUATION_NAME)
|
600 |
+
|
601 |
+
#################################################################################################################
|
602 |
+
# #
|
603 |
+
# Sample Selection #
|
604 |
+
# #
|
605 |
+
#################################################################################################################
|
606 |
+
def _save(self, iteration, ftm):
|
607 |
+
with open(os.path.join(self.data_provider.content_path, "Model","Iteration_{}".format(iteration), 'sample_recommender.pkl'), 'wb') as f:
|
608 |
+
pickle.dump(ftm, f, pickle.HIGHEST_PROTOCOL)
|
609 |
+
|
610 |
+
def _init_detection(self, iteration, lb_idxs, period=80):
|
611 |
+
# prepare trajectory
|
612 |
+
embedding_path = os.path.join(self.data_provider.content_path,"Model", "Iteration_{}".format(iteration),'trajectory_embeddings.npy')
|
613 |
+
if os.path.exists(embedding_path):
|
614 |
+
trajectories = np.load(embedding_path)
|
615 |
+
print("Load trajectories from cache!")
|
616 |
+
else:
|
617 |
+
# extract samples
|
618 |
+
train_num = self.data_provider.train_num
|
619 |
+
# change epoch_NUM
|
620 |
+
epoch_num = (self.data_provider.e - self.data_provider.s)//self.data_provider.p + 1
|
621 |
+
embeddings_2d = np.zeros((epoch_num, train_num, 2))
|
622 |
+
for i in range(self.data_provider.s, self.data_provider.e+1, self.data_provider.p):
|
623 |
+
# for i in range(self.data_provider.e - self.data_provider.p*(self.period-1), self.data_provider.e+1, self.data_provider.p):
|
624 |
+
# id = (i-(self.data_provider.e - (self.data_provider.p-1)*self.period))//self.data_provider.p
|
625 |
+
id = (i - self.data_provider.s)//self.data_provider.p
|
626 |
+
embeddings_2d[id] = self.projector.batch_project(iteration, i, self.data_provider.train_representation(iteration, i))
|
627 |
+
trajectories = np.transpose(embeddings_2d, [1,0,2])
|
628 |
+
np.save(embedding_path, trajectories)
|
629 |
+
# prepare uncertainty
|
630 |
+
uncertainty_path = os.path.join(self.data_provider.content_path, "Model","Iteration_{}".format(iteration), 'uncertainties.npy')
|
631 |
+
if os.path.exists(uncertainty_path):
|
632 |
+
uncertainty = np.load(uncertainty_path)
|
633 |
+
else:
|
634 |
+
samples = self.data_provider.train_representation(iteration, epoch_num)
|
635 |
+
pred = self.data_provider.get_pred(iteration, epoch_num, samples)
|
636 |
+
uncertainty = 1 - np.amax(softmax(pred, axis=1), axis=1)
|
637 |
+
np.save(uncertainty_path, uncertainty)
|
638 |
+
ulb_idxs = self.data_provider.get_unlabeled_idx(len(uncertainty), lb_idxs)
|
639 |
+
# prepare sampling manager
|
640 |
+
ntd_path = os.path.join(self.data_provider.content_path, "Model","Iteration_{}".format(iteration), 'sample_recommender.pkl')
|
641 |
+
if os.path.exists(ntd_path):
|
642 |
+
with open(ntd_path, 'rb') as f:
|
643 |
+
ntd = pickle.load(f)
|
644 |
+
else:
|
645 |
+
ntd = Recommender(uncertainty[ulb_idxs], trajectories[ulb_idxs], 30, period=period,metric="a")
|
646 |
+
print("Detecting abnormal....")
|
647 |
+
ntd.clustered()
|
648 |
+
print("Finish detection!")
|
649 |
+
self._save(iteration, ntd)
|
650 |
+
return ntd, ulb_idxs
|
651 |
+
|
652 |
+
def _suggest_abnormal(self, strategy, iteration, lb_idxs, acc_idxs, rej_idxs, budget, period):
|
653 |
+
ntd,ulb_idxs = self._init_detection(iteration, lb_idxs, period)
|
654 |
+
map_ulb = ulb_idxs.tolist()
|
655 |
+
map_acc_idxs = np.array([map_ulb.index(i) for i in acc_idxs]).astype(np.int32)
|
656 |
+
map_rej_idxs = np.array([map_ulb.index(i) for i in rej_idxs]).astype(np.int32)
|
657 |
+
if strategy == "TBSampling":
|
658 |
+
suggest_idxs, scores = ntd.sample_batch_init(map_acc_idxs, map_rej_idxs, budget)
|
659 |
+
elif strategy == "Feedback":
|
660 |
+
suggest_idxs, scores = ntd.sample_batch(map_acc_idxs, map_rej_idxs, budget)
|
661 |
+
else:
|
662 |
+
raise NotImplementedError
|
663 |
+
return ulb_idxs[suggest_idxs], scores
|
664 |
+
|
665 |
+
def _suggest_normal(self, strategy, iteration, lb_idxs, acc_idxs, rej_idxs, budget, period):
|
666 |
+
ntd, ulb_idxs = self._init_detection(iteration, lb_idxs, period)
|
667 |
+
map_ulb = ulb_idxs.tolist()
|
668 |
+
map_acc_idxs = np.array([map_ulb.index(i) for i in acc_idxs]).astype(np.int32)
|
669 |
+
map_rej_idxs = np.array([map_ulb.index(i) for i in rej_idxs]).astype(np.int32)
|
670 |
+
if strategy == "TBSampling":
|
671 |
+
suggest_idxs, _ = ntd.sample_batch_normal_init(map_acc_idxs, map_rej_idxs, budget)
|
672 |
+
elif strategy == "Feedback":
|
673 |
+
suggest_idxs, _ = ntd.sample_batch_normal(map_acc_idxs, map_rej_idxs, budget)
|
674 |
+
else:
|
675 |
+
raise NotImplementedError
|
676 |
+
return ulb_idxs[suggest_idxs]
|
677 |
+
|
678 |
+
|
679 |
+
class AnormalyTimeVisBackend(TimeVisBackend):
|
680 |
+
|
681 |
+
def __init__(self, data_provider, projector, vis, evaluator, period, **hyperparameters) -> None:
|
682 |
+
super().__init__(data_provider, projector, vis, evaluator, **hyperparameters)
|
683 |
+
self.period = period
|
684 |
+
file_path = os.path.join(self.data_provider.content_path, 'clean_label.json')
|
685 |
+
with open(file_path, "r") as f:
|
686 |
+
self.clean_labels = np.array(json.load(f))
|
687 |
+
|
688 |
+
def reset(self):
|
689 |
+
return
|
690 |
+
|
691 |
+
#################################################################################################################
|
692 |
+
# #
|
693 |
+
# Anormaly Detection #
|
694 |
+
# #
|
695 |
+
#################################################################################################################
|
696 |
+
|
697 |
+
def _save(self, ntd):
|
698 |
+
with open(os.path.join(self.data_provider.content_path, 'sample_recommender.pkl'), 'wb') as f:
|
699 |
+
pickle.dump(ntd, f, pickle.HIGHEST_PROTOCOL)
|
700 |
+
|
701 |
+
def _init_detection(self):
|
702 |
+
# prepare trajectories
|
703 |
+
embedding_path = os.path.join(self.data_provider.content_path, 'trajectory_embeddings.npy')
|
704 |
+
if os.path.exists(embedding_path):
|
705 |
+
trajectories = np.load(embedding_path)
|
706 |
+
else:
|
707 |
+
# extract samples
|
708 |
+
train_num = self.data_provider.train_num
|
709 |
+
# change epoch_NUM
|
710 |
+
epoch_num = (self.data_provider.e - self.data_provider.s)//self.data_provider.p + 1
|
711 |
+
embeddings_2d = np.zeros((epoch_num, train_num, 2))
|
712 |
+
for i in range(self.data_provider.s, self.data_provider.e+1, self.data_provider.p):
|
713 |
+
# for i in range(self.data_provider.e - self.data_provider.p*(self.period-1), self.data_provider.e+1, self.data_provider.p):
|
714 |
+
# id = (i-(self.data_provider.e - (self.data_provider.p-1)*self.period))//self.data_provider.p
|
715 |
+
id = (i - self.data_provider.s)//self.data_provider.p
|
716 |
+
embeddings_2d[id] = self.projector.batch_project(i, self.data_provider.train_representation(i))
|
717 |
+
trajectories = np.transpose(embeddings_2d, [1,0,2])
|
718 |
+
np.save(embedding_path, trajectories)
|
719 |
+
# prepare uncertainty scores
|
720 |
+
uncertainty_path = os.path.join(self.data_provider.content_path, 'uncertainties.npy')
|
721 |
+
if os.path.exists(uncertainty_path):
|
722 |
+
uncertainty = np.load(uncertainty_path)
|
723 |
+
else:
|
724 |
+
epoch_num = (self.data_provider.e - self.data_provider.s)//self.data_provider.p + 1
|
725 |
+
samples = self.data_provider.train_representation(epoch_num)
|
726 |
+
pred = self.data_provider.get_pred(epoch_num, samples)
|
727 |
+
uncertainty = 1 - np.amax(softmax(pred, axis=1), axis=1)
|
728 |
+
np.save(uncertainty_path, uncertainty)
|
729 |
+
|
730 |
+
# prepare sampling manager
|
731 |
+
ntd_path = os.path.join(self.data_provider.content_path, 'sample_recommender.pkl')
|
732 |
+
if os.path.exists(ntd_path):
|
733 |
+
with open(ntd_path, 'rb') as f:
|
734 |
+
ntd = pickle.load(f)
|
735 |
+
else:
|
736 |
+
ntd = Recommender(uncertainty, trajectories, 30,period=self.period,metric="a")
|
737 |
+
print("Detecting abnormal....")
|
738 |
+
ntd.clustered()
|
739 |
+
print("Finish detection!")
|
740 |
+
self._save(ntd)
|
741 |
+
return ntd
|
742 |
+
|
743 |
+
def suggest_abnormal(self, strategy, acc_idxs, rej_idxs, budget):
|
744 |
+
ntd = self._init_detection()
|
745 |
+
if strategy == "TBSampling":
|
746 |
+
suggest_idxs, scores = ntd.sample_batch_init(acc_idxs, rej_idxs, budget)
|
747 |
+
elif strategy == "Feedback":
|
748 |
+
suggest_idxs, scores = ntd.sample_batch(acc_idxs, rej_idxs, budget)
|
749 |
+
else:
|
750 |
+
raise NotImplementedError
|
751 |
+
suggest_labels = self.clean_labels[suggest_idxs]
|
752 |
+
return suggest_idxs, scores, suggest_labels
|
753 |
+
|
754 |
+
def suggest_normal(self, strategy, acc_idxs, rej_idxs, budget):
|
755 |
+
ntd = self._init_detection()
|
756 |
+
if strategy == "TBSampling":
|
757 |
+
suggest_idxs, _ = ntd.sample_batch_normal_init(acc_idxs, rej_idxs, budget)
|
758 |
+
elif strategy == "Feedback":
|
759 |
+
suggest_idxs, _ = ntd.sample_batch_normal(acc_idxs, rej_idxs, budget)
|
760 |
+
else:
|
761 |
+
raise NotImplementedError
|
762 |
+
suggest_labels = self.clean_labels[suggest_idxs]
|
763 |
+
return suggest_idxs, suggest_labels
|
764 |
+
|
765 |
+
|
saved_models/server/timevis_backend/res_logging.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import csv
|
3 |
+
|
4 |
+
def add_line(path, data_row):
|
5 |
+
"""
|
6 |
+
data_row: list, [API_name, username, time]
|
7 |
+
"""
|
8 |
+
now_time = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime())
|
9 |
+
data_row.append(now_time)
|
10 |
+
with open(path, "a+") as f:
|
11 |
+
csv_write = csv.writer(f)
|
12 |
+
csv_write.writerow(data_row)
|
saved_models/server/timevis_backend/utils.py
ADDED
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os, sys
|
2 |
+
# sys.path.append("..")
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import numpy as np
|
6 |
+
from umap.umap_ import find_ab_params
|
7 |
+
import pickle
|
8 |
+
import gc
|
9 |
+
import base64
|
10 |
+
from .backend_adapter import TimeVisBackend, ActiveLearningTimeVisBackend, AnormalyTimeVisBackend
|
11 |
+
|
12 |
+
timevis_path = "../../DLVisDebugger"
|
13 |
+
sys.path.append(timevis_path)
|
14 |
+
from singleVis.SingleVisualizationModel import VisModel
|
15 |
+
from singleVis.losses import SingleVisLoss, UmapLoss, ReconstructionLoss, SmoothnessLoss, HybridLoss
|
16 |
+
from singleVis.trainer import SingleVisTrainer, HybridVisTrainer
|
17 |
+
from singleVis.data import NormalDataProvider, ActiveLearningDataProvider, DenseActiveLearningDataProvider
|
18 |
+
from singleVis.eval.evaluator import Evaluator
|
19 |
+
from singleVis.visualizer import visualizer, DenseALvisualizer
|
20 |
+
from singleVis.projector import Projector, ALProjector, DenseALProjector
|
21 |
+
from singleVis.segmenter import Segmenter
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
def initialize_backend(CONTENT_PATH, dense_al=False):
|
26 |
+
|
27 |
+
import config
|
28 |
+
|
29 |
+
# load hyperparameters
|
30 |
+
CLASSES = config["CLASSES"]
|
31 |
+
DATASET = config["DATASET"]
|
32 |
+
GPU_ID = config["GPU"]
|
33 |
+
DEVICE = torch.device("cuda:{}".format(GPU_ID) if torch.cuda.is_available() else "cpu")
|
34 |
+
################################################# VISUALIZATION PARAMETERS ########################################
|
35 |
+
PREPROCESS = config["VISUALIZATION"]["PREPROCESS"]
|
36 |
+
B_N_EPOCHS = config["VISUALIZATION"]["BOUNDARY"]["B_N_EPOCHS"]
|
37 |
+
L_BOUND = config["VISUALIZATION"]["BOUNDARY"]["L_BOUND"]
|
38 |
+
LAMBDA = config["VISUALIZATION"]["LAMBDA"]
|
39 |
+
# HIDDEN_LAYER = config["VISUALIZATION"]["HIDDEN_LAYER"]
|
40 |
+
ENCODER_DIMS = config["VISUALIZATION"]["ENCODER_DIMS"]
|
41 |
+
DECODER_DIMS = config["VISUALIZATION"]["DECODER_DIMS"]
|
42 |
+
N_NEIGHBORS = config["VISUALIZATION"]["N_NEIGHBORS"]
|
43 |
+
MAX_EPOCH = config["VISUALIZATION"]["MAX_EPOCH"]
|
44 |
+
S_N_EPOCHS = config["VISUALIZATION"]["S_N_EPOCHS"]
|
45 |
+
PATIENT = config["VISUALIZATION"]["PATIENT"]
|
46 |
+
VIS_MODEL_NAME = config["VISUALIZATION"]["VIS_MODEL_NAME"]
|
47 |
+
RESOLUTION = config["VISUALIZATION"]["RESOLUTION"]
|
48 |
+
EVALUATION_NAME = config["VISUALIZATION"]["EVALUATION_NAME"]
|
49 |
+
NET = config["TRAINING"]["NET"]
|
50 |
+
|
51 |
+
|
52 |
+
SETTING = config["SETTING"] # active learning
|
53 |
+
if SETTING == "normal" or SETTING == "abnormal":
|
54 |
+
EPOCH_START = config["EPOCH_START"]
|
55 |
+
EPOCH_END = config["EPOCH_END"]
|
56 |
+
EPOCH_PERIOD = config["EPOCH_PERIOD"]
|
57 |
+
|
58 |
+
INIT_NUM = config["VISUALIZATION"]["INIT_NUM"]
|
59 |
+
ALPHA = config["VISUALIZATION"]["ALPHA"]
|
60 |
+
BETA = config["VISUALIZATION"]["BETA"]
|
61 |
+
MAX_HAUSDORFF = config["VISUALIZATION"]["MAX_HAUSDORFF"]
|
62 |
+
T_N_EPOCHS = config["VISUALIZATION"]["T_N_EPOCHS"]
|
63 |
+
elif SETTING == "active learning":
|
64 |
+
BASE_ITERATION = config["BASE_ITERATION"]
|
65 |
+
TOTAL_EPOCH = config["TRAINING"]["total_epoch"]
|
66 |
+
else:
|
67 |
+
raise NotImplementedError
|
68 |
+
|
69 |
+
import Model.model as subject_model
|
70 |
+
net = eval("subject_model.{}()".format(NET))
|
71 |
+
|
72 |
+
|
73 |
+
# ########################################################################################################################
|
74 |
+
# TRAINING SETTING #
|
75 |
+
# ########################################################################################################################
|
76 |
+
|
77 |
+
# model = SingleVisualizationModel(input_dims=512, output_dims=2, units=256, hidden_layer=HIDDEN_LAYER)
|
78 |
+
model = VisModel(ENCODER_DIMS, DECODER_DIMS)
|
79 |
+
|
80 |
+
if SETTING == "normal" or SETTING == "abnormal":
|
81 |
+
data_provider = NormalDataProvider(CONTENT_PATH, net, EPOCH_START, EPOCH_END, EPOCH_PERIOD, device=DEVICE, classes=CLASSES,epoch_name="Epoch", verbose=1)
|
82 |
+
segmenter = Segmenter(data_provider=data_provider, threshold=78.5, range_s=EPOCH_START, range_e=EPOCH_END, range_p=EPOCH_PERIOD)
|
83 |
+
SEGMENTS = segmenter.segment()
|
84 |
+
# SEGMENTS = config["VISUALIZATION"]["SEGMENTS"]
|
85 |
+
projector = Projector(vis_model=model, content_path=CONTENT_PATH, segments=SEGMENTS, device=DEVICE)
|
86 |
+
elif SETTING == "active learning":
|
87 |
+
DENSE_VIS_MODEL_NAME = config["VISUALIZATION"]["DENSE_VIS_MODEL_NAME"]
|
88 |
+
if dense_al:
|
89 |
+
data_provider = DenseActiveLearningDataProvider(CONTENT_PATH, net, BASE_ITERATION, epoch_num=TOTAL_EPOCH, split=-1, device=DEVICE, classes=CLASSES,verbose=1)
|
90 |
+
projector = DenseALProjector(vis_model=model, content_path=CONTENT_PATH, vis_model_name=DENSE_VIS_MODEL_NAME, device=DEVICE)
|
91 |
+
else:
|
92 |
+
data_provider = ActiveLearningDataProvider(CONTENT_PATH, net, BASE_ITERATION, split=-1, device=DEVICE, classes=CLASSES, verbose=1)
|
93 |
+
projector = ALProjector(vis_model=model, content_path=CONTENT_PATH, vis_model_name=VIS_MODEL_NAME, device=DEVICE)
|
94 |
+
|
95 |
+
# ########################################################################################################################
|
96 |
+
# # TRAIN #
|
97 |
+
# ########################################################################################################################
|
98 |
+
|
99 |
+
if SETTING == "active learning":
|
100 |
+
negative_sample_rate = 5
|
101 |
+
min_dist = .1
|
102 |
+
_a, _b = find_ab_params(1.0, min_dist)
|
103 |
+
umap_loss_fn = UmapLoss(negative_sample_rate, DEVICE, _a, _b, repulsion_strength=1.0)
|
104 |
+
recon_loss_fn = ReconstructionLoss(beta=1.0)
|
105 |
+
if dense_al:
|
106 |
+
smooth_loss_fn = SmoothnessLoss(margin=1.)
|
107 |
+
S_LAMBDA = config["VISUALIZATION"]["S_LAMBDA"]
|
108 |
+
criterion = HybridLoss(umap_loss_fn, recon_loss_fn, smooth_loss_fn, lambd1=LAMBDA, lambd2=S_LAMBDA)
|
109 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=.01, weight_decay=1e-5)
|
110 |
+
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=.1)
|
111 |
+
trainer = HybridVisTrainer(model, criterion, optimizer, lr_scheduler,edge_loader=None, DEVICE=DEVICE)
|
112 |
+
else:
|
113 |
+
criterion = SingleVisLoss(umap_loss_fn, recon_loss_fn, lambd=LAMBDA)
|
114 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=.01, weight_decay=1e-5)
|
115 |
+
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=.1)
|
116 |
+
trainer = SingleVisTrainer(model, criterion, optimizer, lr_scheduler,edge_loader=None, DEVICE=DEVICE)
|
117 |
+
|
118 |
+
# ########################################################################################################################
|
119 |
+
# # EVALUATION #
|
120 |
+
# ########################################################################################################################
|
121 |
+
|
122 |
+
if dense_al:
|
123 |
+
vis = DenseALvisualizer(data_provider, projector, RESOLUTION)
|
124 |
+
else:
|
125 |
+
vis = visualizer(data_provider, projector, RESOLUTION)
|
126 |
+
evaluator = Evaluator(data_provider, projector)
|
127 |
+
|
128 |
+
if SETTING == "normal":
|
129 |
+
timevis = TimeVisBackend(data_provider, projector, vis, evaluator, **config)
|
130 |
+
elif SETTING == "abnormal":
|
131 |
+
timevis = AnormalyTimeVisBackend(data_provider, projector, vis, evaluator, period=100, **config)
|
132 |
+
elif SETTING == "active learning":
|
133 |
+
timevis = ActiveLearningTimeVisBackend(data_provider, projector, trainer, vis, evaluator, dense_al, **config)
|
134 |
+
|
135 |
+
del config
|
136 |
+
gc.collect()
|
137 |
+
return timevis
|
138 |
+
|
139 |
+
|
140 |
+
def update_epoch_projection(timevis, EPOCH, predicates):
|
141 |
+
train_data = timevis.data_provider.train_representation(EPOCH)
|
142 |
+
test_data = timevis.data_provider.test_representation(EPOCH)
|
143 |
+
all_data = np.concatenate((train_data, test_data), axis=0)
|
144 |
+
|
145 |
+
fname = "Epoch" if timevis.data_provider.mode == "normal" or timevis.data_provider.mode == "abnormal" else "Iteration"
|
146 |
+
embedding_path = os.path.join(timevis.data_provider.model_path, "{}_{}".format(fname, EPOCH), "embedding.npy")
|
147 |
+
if os.path.exists(embedding_path):
|
148 |
+
embedding_2d = np.load(embedding_path)
|
149 |
+
else:
|
150 |
+
embedding_2d = timevis.projector.batch_project(EPOCH, all_data)
|
151 |
+
np.save(embedding_path, embedding_2d)
|
152 |
+
|
153 |
+
train_labels = timevis.data_provider.train_labels(EPOCH)
|
154 |
+
test_labels = timevis.data_provider.test_labels(EPOCH)
|
155 |
+
labels = np.concatenate((train_labels, test_labels), axis=0).tolist()
|
156 |
+
|
157 |
+
training_data_number = timevis.hyperparameters["TRAINING"]["train_num"]
|
158 |
+
testing_data_number = timevis.hyperparameters["TRAINING"]["test_num"]
|
159 |
+
training_data_index = list(range(training_data_number))
|
160 |
+
testing_data_index = list(range(training_data_number, training_data_number + testing_data_number))
|
161 |
+
|
162 |
+
# return the image of background
|
163 |
+
# read cache if exists
|
164 |
+
fname = "Epoch" if timevis.data_provider.mode == "normal" or timevis.data_provider.mode == "abnormal" else "Iteration"
|
165 |
+
bgimg_path = os.path.join(timevis.data_provider.model_path, "{}_{}".format(fname, EPOCH), "bgimg.png")
|
166 |
+
grid_path = os.path.join(timevis.data_provider.model_path, "{}_{}".format(fname, EPOCH), "grid.pkl")
|
167 |
+
if os.path.exists(bgimg_path) and os.path.exists(grid_path):
|
168 |
+
with open(os.path.join(grid_path), "rb") as f:
|
169 |
+
grid = pickle.load(f)
|
170 |
+
with open(bgimg_path, 'rb') as img_f:
|
171 |
+
img_stream = img_f.read()
|
172 |
+
b_fig = base64.b64encode(img_stream).decode()
|
173 |
+
else:
|
174 |
+
x_min, y_min, x_max, y_max, b_fig = timevis.vis.get_background(EPOCH, timevis.hyperparameters["VISUALIZATION"]["RESOLUTION"])
|
175 |
+
grid = [x_min, y_min, x_max, y_max]
|
176 |
+
# formating
|
177 |
+
grid = [float(i) for i in grid]
|
178 |
+
b_fig = str(b_fig, encoding='utf-8')
|
179 |
+
|
180 |
+
# save results, grid and decision_view
|
181 |
+
save_path = timevis.data_provider.model_path
|
182 |
+
iteration_name = "Epoch" if timevis.data_provider.mode == "normal" or timevis.data_provider.mode == "abnormal" else "Iteration"
|
183 |
+
save_path = os.path.join(save_path, "{}_{}".format(iteration_name, EPOCH))
|
184 |
+
with open(os.path.join(save_path, "grid.pkl"), "wb") as f:
|
185 |
+
pickle.dump(grid, f)
|
186 |
+
np.save(os.path.join(save_path, "embedding.npy"), embedding_2d)
|
187 |
+
|
188 |
+
color = timevis.vis.get_standard_classes_color() * 255
|
189 |
+
color = color.astype(int).tolist()
|
190 |
+
|
191 |
+
# TODO fix its structure
|
192 |
+
file_name = timevis.hyperparameters["VISUALIZATION"]["EVALUATION_NAME"]
|
193 |
+
evaluation = timevis.evaluator.get_eval(file_name=file_name)
|
194 |
+
eval_new = dict()
|
195 |
+
# eval_new["nn_train_15"] = evaluation["15"]['nn_train'][str(EPOCH)]
|
196 |
+
# eval_new['nn_test_15'] = evaluation["15"]['nn_test'][str(EPOCH)]
|
197 |
+
# eval_new['bound_train_15'] = evaluation["15"]['b_train'][str(EPOCH)]
|
198 |
+
# eval_new['bound_test_15'] = evaluation["15"]['b_test'][str(EPOCH)]
|
199 |
+
# eval_new['ppr_train'] = evaluation["ppr_train"][str(EPOCH)]
|
200 |
+
# eval_new['ppr_test'] = evaluation["ppr_test"][str(EPOCH)]
|
201 |
+
# eval_new["nn_train_15"] = 1
|
202 |
+
# eval_new['nn_test_15'] = 1
|
203 |
+
# eval_new['bound_train_15'] = 1
|
204 |
+
# eval_new['bound_test_15'] = 1
|
205 |
+
# eval_new['ppr_train'] = 1
|
206 |
+
# eval_new['ppr_test'] = 1
|
207 |
+
eval_new["train_acc"] = evaluation["train_acc"][str(EPOCH)]
|
208 |
+
eval_new["test_acc"] = evaluation["test_acc"][str(EPOCH)]
|
209 |
+
|
210 |
+
label_color_list = []
|
211 |
+
label_list = []
|
212 |
+
label_name_dict = dict()
|
213 |
+
for i, label in enumerate(timevis.hyperparameters["CLASSES"]):
|
214 |
+
label_name_dict[i] = label
|
215 |
+
|
216 |
+
for label in labels:
|
217 |
+
label_color_list.append(color[int(label)])
|
218 |
+
label_list.append(timevis.hyperparameters["CLASSES"][int(label)])
|
219 |
+
|
220 |
+
prediction_list = []
|
221 |
+
prediction = timevis.data_provider.get_pred(EPOCH, all_data).argmax(1)
|
222 |
+
|
223 |
+
for i in range(len(prediction)):
|
224 |
+
prediction_list.append(timevis.hyperparameters["CLASSES"][prediction[i]])
|
225 |
+
|
226 |
+
if timevis.hyperparameters["SETTING"] == "normal" or timevis.hyperparameters["SETTING"] == "abnormal":
|
227 |
+
max_iter = (timevis.hyperparameters["EPOCH_END"] - timevis.hyperparameters["EPOCH_START"]) // timevis.hyperparameters["EPOCH_PERIOD"] + 1
|
228 |
+
elif timevis.hyperparameters["SETTING"] == "active learning":
|
229 |
+
# TODO fix this, could be larger than EPOCH
|
230 |
+
max_iter = timevis.get_max_iter()
|
231 |
+
# max_iter = max(timevis.hyperparameters["BASE_ITERATION"], EPOCH)
|
232 |
+
|
233 |
+
# current_index = timevis.get_epoch_index(EPOCH)
|
234 |
+
# selected_points = np.arange(training_data_number + testing_data_number)[current_index]
|
235 |
+
selected_points = np.arange(training_data_number + testing_data_number)
|
236 |
+
for key in predicates.keys():
|
237 |
+
if key == "label":
|
238 |
+
tmp = np.array(timevis.filter_label(predicates[key]))
|
239 |
+
elif key == "type":
|
240 |
+
tmp = np.array(timevis.filter_type(predicates[key], int(EPOCH)))
|
241 |
+
else:
|
242 |
+
tmp = np.arange(training_data_number + testing_data_number)
|
243 |
+
selected_points = np.intersect1d(selected_points, tmp)
|
244 |
+
|
245 |
+
properties = np.concatenate((np.zeros(training_data_number, dtype=np.int16), 2*np.ones(testing_data_number, dtype=np.int16)), axis=0)
|
246 |
+
lb = timevis.get_epoch_index(EPOCH)
|
247 |
+
ulb = np.setdiff1d(training_data_index, lb)
|
248 |
+
properties[ulb] = 1
|
249 |
+
|
250 |
+
return embedding_2d.tolist(), grid, b_fig, label_name_dict, label_color_list, label_list, max_iter, training_data_index, testing_data_index, eval_new, prediction_list, selected_points, properties
|
saved_models/tensorboard/.bazelrc
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use the Angular Ivy compiler
|
2 |
+
# See https://github.com/angular/angular/blob/master/docs/BAZEL.md#various-flags-used-for-tests
|
3 |
+
build --define=angular_ivy_enabled=True
|
4 |
+
|
5 |
+
common --experimental_repo_remote_exec # from TensorFlow
|
6 |
+
|
7 |
+
# Use C++ backing implementations for Python proto parsing and deserialization,
|
8 |
+
# which is much faster (~10x).
|
9 |
+
build --define=use_fast_cpp_protos=true
|
saved_models/tensorboard/.clang-format
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
BasedOnStyle: google
|
saved_models/tensorboard/.git-blame-ignore-revs
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
# ==============================================================================
|
15 |
+
|
16 |
+
# List of commits to ignore by default in `git-blame`. Add to this list
|
17 |
+
# ONLY commits that are certain to have been functional no-ops, like
|
18 |
+
# automated reformattings.
|
19 |
+
#
|
20 |
+
# To make use of this, you must set the `blame.ignoreRevsFile` Git config
|
21 |
+
# option to point to this file. See `git help blame` and `git help config` for
|
22 |
+
# more details.
|
23 |
+
|
24 |
+
# Giant JavaScript reformat (<https://github.com/tensorflow/tensorboard/issues/2493>)
|
25 |
+
# git log --author='TensorBoard Gardener' --grep 'prettier: reformat' --format='%H # %s'
|
26 |
+
fce0c3fbb63b8a3aaab175f5274cf15adac89273 # prettier: reformat directory tensorboard/components
|
27 |
+
bbe629969692b1fd8b5876d57e0a8d756a499c61 # prettier: reformat directory tensorboard/plugins/text
|
28 |
+
90eb2073fa8ebc1ecbe5acc0f96d34488434206f # prettier: reformat directory tensorboard/plugins/mesh
|
29 |
+
9ef4eee6ed1d704994e26bdd53d01c4d063f997a # prettier: reformat directory tensorboard/plugins/image
|
30 |
+
749332040e8f92cb0858a99d72f6e98a0a5f2854 # prettier: reformat directory tensorboard/plugins/graph
|
31 |
+
010511580c98df3c5e7680b8b1ea81838671ba51 # prettier: reformat directory tensorboard/plugins/audio
|
32 |
+
7cbe2bafba4d09a1d9670b849b67d1af2769d58c # prettier: reformat directory tensorboard/plugins/scalar
|
33 |
+
4a7be4447d4d00f92271982ec41a76663778feb1 # prettier: reformat directory tensorboard/plugins/profile
|
34 |
+
40266070914ad915fb4f3e57a890ad9ae7db6b3c # prettier: reformat directory tensorboard/plugins/hparams
|
35 |
+
b3a9aa3b732df93580bb359f6640e0520ce5638a # prettier: reformat directory tensorboard/plugins/pr_curve
|
36 |
+
a14f6779b79493dae4ed0649996bd8b15956ec5c # prettier: reformat directory tensorboard/plugins/debugger
|
37 |
+
e6c61797f8cdaa3607fe7bebbee296077ed6a360 # prettier: reformat directory tensorboard/plugins/beholder
|
38 |
+
103aa46362269198a0f997fc1fdcd89d87f0e70a # prettier: reformat directory tensorboard/plugins/projector
|
39 |
+
9086d6c9ef8db370ad5d2b3619ba2f077e11cccd # prettier: reformat directory tensorboard/plugins/histogram
|
40 |
+
1420ad645db2fa4eca5bb93292f692603e0c1d4b # prettier: reformat directory tensorboard/plugins/distribution
|
41 |
+
5aaf86506448c05154c8164c924ab633b1eb8c5e # prettier: reformat directory tensorboard/plugins/custom_scalar
|
42 |
+
c88af8dac986e6cfe938167d33899ea9e193d750 # prettier: reformat directory tensorboard/plugins/profile/pod_viewer
|
43 |
+
d539b827b8a7e2c12018dde9f4405b611291ef1a # prettier: reformat directory tensorboard/plugins/profile/memory_viewer
|
44 |
+
65c31bde8c20820ece126b0f806e488bcb2497b8 # prettier: reformat directory tensorboard/plugins/interactive_inference
|
45 |
+
8bc5430e2911822ffab02a598342cda542c5fa2c # prettier: reformat directory tensorboard/plugins/example/tensorboard_plugin_example
|
46 |
+
b3a5c14645da6980ac329b7850aa70be9914ad80 # prettier: reformat directory tensorboard/plugins/interactive_inference/witwidget/notebook/jupyter/js
|
47 |
+
|
48 |
+
# Giant Python reformat (<https://github.com/tensorflow/tensorboard/issues/2967>)
|
49 |
+
# git log --author='TensorBoard Gardener' --grep 'black: reformat' --format='%H # %s'
|
50 |
+
a20f53e11f5d48f5eee66d178427eacd7469bb11 # black: reformat directory tensorboard
|
51 |
+
c2fbc3e1f75b39d147a551dd8b937afdb9bf42ea # black: reformat directory tensorboard/util
|
52 |
+
3de1082df62ad697eae99cbd2cd63aa240f21e15 # black: reformat directory tensorboard/defs
|
53 |
+
91b0c32e8312a75efa944e50d4be0504c8ce5585 # black: reformat directory tensorboard/data
|
54 |
+
dfc32ae277c76b64893f96ae35b94b49df68408f # black: reformat directory tensorboard/tools
|
55 |
+
cc77a6320ab5901ad277bac6d98444fe7237098f # black: reformat directory tensorboard/compat
|
56 |
+
6f10a3d2c4a9e286db6b30886171ce0b938c0aeb # black: reformat directory tensorboard/summary
|
57 |
+
388e97cd6e20dd2882e3c9cef44244d63f85bcd1 # black: reformat directory tensorboard/scripts
|
58 |
+
ee55f91226a736579b5e2b396190ccc50959f40c # black: reformat directory tensorboard/plugins
|
59 |
+
ba4e12754d4be543d5b6aa16568081a7155f1e5f # black: reformat directory tensorboard/backend
|
60 |
+
5a3ab3c9282b72cd66b63dd40ebf9e5e50118b6d # black: reformat directory tensorboard/uploader
|
61 |
+
9146ae0e87a3403d48ff299041c974bf9d123c39 # black: reformat directory tensorboard/pip_package
|
62 |
+
2726634abc010f8a17c21aa6e30877f7bcbe028b # black: reformat directory tensorboard/plugins/text
|
63 |
+
159a9911f3ca2bc9cbf0b1dca1ef54f166cf0fb2 # black: reformat directory tensorboard/plugins/mesh
|
64 |
+
89cd022a958b15454e977e28e04900b21e63bdf4 # black: reformat directory tensorboard/plugins/core
|
65 |
+
c71e21b695ec58f5585f009dc098d513c97fc6a6 # black: reformat directory tensorboard/plugins/image
|
66 |
+
5e8da1bad672050082754624dd9cec50b6bc3238 # black: reformat directory tensorboard/plugins/graph
|
67 |
+
c2d220396a66114231e95d8ca55d7386a771c96d # black: reformat directory tensorboard/plugins/audio
|
68 |
+
47f246bfd7786a31c2cf2bb06e1b3cf35993f706 # black: reformat directory tensorboard/plugins/scalar
|
69 |
+
666b75e91fe505ab28900230cd43970482fd045f # black: reformat directory tensorboard/plugins/profile
|
70 |
+
3aa9cdf4c979fa4ce1c212debd1504442c93dd82 # black: reformat directory tensorboard/plugins/hparams
|
71 |
+
7664c071a00d3b3021d56e04838ceed547fac79c # black: reformat directory tensorboard/functionaltests
|
72 |
+
6248d54089524ccff5fe4aedeb2c5089b62c9a12 # black: reformat directory tensorboard/plugins/pr_curve
|
73 |
+
f53e34d839f1deff8bfd9199413733863e42f1e8 # black: reformat directory tensorboard/plugins/debugger
|
74 |
+
c92eaa91bacd0fcf8454f56b8ca112073a339828 # black: reformat directory tensorboard/plugins/beholder
|
75 |
+
92a6e98ab07255ed642dd074017558be44b16f7e # black: reformat directory tensorboard/plugins/projector
|
76 |
+
73d977ea7eda89102eca03a943580a7fddc6f829 # black: reformat directory tensorboard/plugins/histogram
|
77 |
+
c6020666b2d93541a9324547d7a7b6677e0fc6e4 # black: reformat directory tensorboard/plugins/debugger_v2
|
78 |
+
242e5cce72971d14dba3505f3d66534a8e8e6c26 # black: reformat directory tensorboard/plugins/distribution
|
79 |
+
51b9e27fb096c9df055de4461cbdfb344b344700 # black: reformat directory tensorboard/plugins/custom_scalar
|
80 |
+
1332da76f09664d31ed13afbaa9824ffd6e7bc77 # black: reformat directory tensorboard/backend/event_processing
|
81 |
+
8a8025e8e99b03bf6b7ee0aafc7e20d37600f0c7 # black: reformat directory tensorboard/plugins/interactive_inference
|
82 |
+
ae32a4adcab8c3349c3bff21240078334fef1371 # black: reformat directory tensorboard/examples/plugins/example_basic
|
83 |
+
1ccf3fe1c122cd89cde577ffc53b31b9b4e5ee20 # black: reformat directory tensorboard/examples/plugins/example_basic/tensorboard_plugin_example
|
84 |
+
|
85 |
+
# Small BUILD reformat (<https://github.com/tensorflow/tensorboard/pull/3054>)
|
86 |
+
ea4e9bbc885784a283b8b79974132df7c6cdcc50 # Reformat all `*BUILD` files with `buildifier`
|
87 |
+
|
88 |
+
# third_party/js.bzl reformat (<https://github.com/tensorflow/tensorboard/pull/3332>)
|
89 |
+
3e17c44d5bc3c7af72f14059e39d4b872e95d573 # Buildifier format js.bzl for future edits (#3332)
|
90 |
+
|
91 |
+
# Prettier 1.18.2 to 2.1.1 upgrade (<https://github.com/tensorflow/tensorboard/pull/4122>)
|
92 |
+
e42a7f18b33c7a85b51db28d465ec01915a8e725 # prettier: reformat code for 2.1.1
|
saved_models/tensorboard/.github/ISSUE_TEMPLATE/bug_report.md
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
name: Bug report
|
3 |
+
about: Report a problem in TensorBoard
|
4 |
+
title: ''
|
5 |
+
labels: ''
|
6 |
+
assignees: ''
|
7 |
+
|
8 |
+
---
|
9 |
+
|
10 |
+
Consider Stack Overflow for getting support using TensorBoard—they have
|
11 |
+
a larger community with better searchability:
|
12 |
+
|
13 |
+
https://stackoverflow.com/questions/tagged/tensorboard
|
14 |
+
|
15 |
+
Do not use this template for for setup, installation, or configuration
|
16 |
+
issues. Instead, use the “installation problem” issue template:
|
17 |
+
|
18 |
+
https://github.com/tensorflow/tensorboard/issues/new?template=installation_problem.md
|
19 |
+
|
20 |
+
To report a problem with TensorBoard itself, please fill out the
|
21 |
+
remainder of this template.
|
22 |
+
|
23 |
+
## Environment information (required)
|
24 |
+
|
25 |
+
Please run `diagnose_tensorboard.py` (link below) in the same
|
26 |
+
environment from which you normally run TensorFlow/TensorBoard, and
|
27 |
+
paste the output here:
|
28 |
+
|
29 |
+
https://raw.githubusercontent.com/tensorflow/tensorboard/master/tensorboard/tools/diagnose_tensorboard.py
|
30 |
+
|
31 |
+
For browser-related issues, please additionally specify:
|
32 |
+
|
33 |
+
- Browser type and version (e.g., Chrome 64.0.3282.140):
|
34 |
+
- Screenshot, if it’s a visual issue:
|
35 |
+
|
36 |
+
## Issue description
|
37 |
+
|
38 |
+
Please describe the bug as clearly as possible. How can we reproduce the
|
39 |
+
problem without additional resources (including external data files and
|
40 |
+
proprietary Python modules)?
|
saved_models/tensorboard/.github/ISSUE_TEMPLATE/feature_request.md
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
name: Feature request
|
3 |
+
about: Suggest an enhancement to TensorBoard
|
4 |
+
title: ''
|
5 |
+
labels: ''
|
6 |
+
assignees: ''
|
7 |
+
|
8 |
+
---
|
9 |
+
|
10 |
+
Please describe the problem that you’re facing and the enhancements that
|
11 |
+
you’d like to see. Feel free to include screenshots or code samples.
|
saved_models/tensorboard/.github/ISSUE_TEMPLATE/installation_problem.md
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
name: Installation problem
|
3 |
+
about: Report a problem installing or starting TensorBoard
|
4 |
+
title: ''
|
5 |
+
labels: ''
|
6 |
+
assignees: ''
|
7 |
+
|
8 |
+
---
|
9 |
+
|
10 |
+
Use this template if you have a problem building, installing,
|
11 |
+
configuring, or starting TensorBoard and you suspect that there is a
|
12 |
+
problem with TensorBoard itself.
|
13 |
+
|
14 |
+
Consider first reaching out to Stack Overflow for support—they have a
|
15 |
+
larger community with better searchability:
|
16 |
+
|
17 |
+
https://stackoverflow.com/questions/tagged/tensorboard
|
18 |
+
|
19 |
+
## Environment information (required)
|
20 |
+
|
21 |
+
Please run `diagnose_tensorboard.py` (link below) in the same
|
22 |
+
environment from which you normally run TensorFlow/TensorBoard, and
|
23 |
+
paste the output here:
|
24 |
+
|
25 |
+
https://raw.githubusercontent.com/tensorflow/tensorboard/master/tensorboard/tools/diagnose_tensorboard.py
|
26 |
+
|
27 |
+
## Steps to reproduce (required)
|
28 |
+
|
29 |
+
Please provide explicit commands to reproduce the problem in a fresh
|
30 |
+
environment (virtualenv, Conda environment, …). Include any necessary
|
31 |
+
configuration, such as Conda `environment.yml` files.
|
saved_models/tensorboard/.github/PULL_REQUEST_TEMPLATE.md
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
* Motivation for features / changes
|
2 |
+
|
3 |
+
* Technical description of changes
|
4 |
+
|
5 |
+
* Screenshots of UI changes
|
6 |
+
|
7 |
+
* Detailed steps to verify changes work correctly (as executed by you)
|
8 |
+
|
9 |
+
* Alternate designs / implementations considered
|
saved_models/tensorboard/.github/workflows/ci.yml
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# GitHub Actions CI definition for TensorBoard.
|
2 |
+
#
|
3 |
+
# YAML schema for GitHub Actions:
|
4 |
+
# https://help.github.com/en/actions/automating-your-workflow-with-github-actions/workflow-syntax-for-github-actions
|
5 |
+
#
|
6 |
+
# Helpful YAML parser to clarify YAML syntax:
|
7 |
+
# https://yaml-online-parser.appspot.com/
|
8 |
+
|
9 |
+
# For now, we only use GitHub Actions for lint checks, pending better
|
10 |
+
# support for hermetic-style caching. See:
|
11 |
+
# https://github.com/actions/cache/issues/109
|
12 |
+
name: CI
|
13 |
+
|
14 |
+
on:
|
15 |
+
push:
|
16 |
+
branches:
|
17 |
+
- master
|
18 |
+
- '[0-9]+.*'
|
19 |
+
- 'ci-*'
|
20 |
+
pull_request: {}
|
21 |
+
|
22 |
+
env:
|
23 |
+
BUILDTOOLS_VERSION: '3.0.0'
|
24 |
+
BUILDIFIER_SHA256SUM: 'e92a6793c7134c5431c58fbc34700664f101e5c9b1c1fcd93b97978e8b7f88db'
|
25 |
+
BUILDOZER_SHA256SUM: '3d58a0b6972e4535718cdd6c12778170ea7382de7c75bc3728f5719437ffb84d'
|
26 |
+
|
27 |
+
jobs:
|
28 |
+
lint-python-flake8:
|
29 |
+
runs-on: ubuntu-16.04
|
30 |
+
strategy:
|
31 |
+
fail-fast: false
|
32 |
+
matrix:
|
33 |
+
# flake8 should run on each Python version that we target,
|
34 |
+
# because the errors and warnings can differ due to language
|
35 |
+
# changes, and we want to catch them all.
|
36 |
+
python_version: ['3.5', '3.7']
|
37 |
+
steps:
|
38 |
+
- uses: actions/checkout@v1
|
39 |
+
- uses: actions/setup-python@v1
|
40 |
+
with:
|
41 |
+
python-version: ${{ matrix.python_version }}
|
42 |
+
architecture: 'x64'
|
43 |
+
- name: 'Install flake8'
|
44 |
+
run: |
|
45 |
+
python -m pip install -U pip
|
46 |
+
pip install flake8 -c ./tensorboard/pip_package/requirements_dev.txt
|
47 |
+
- run: pip freeze --all
|
48 |
+
- name: 'Lint Python code for errors with flake8'
|
49 |
+
# See: http://flake8.pycqa.org/en/3.7.8/user/error-codes.html
|
50 |
+
# Use the comment '# noqa: <error code>' to suppress.
|
51 |
+
run: flake8 . --count --select=E9,F63,F7,F82,F401 --show-source --statistics
|
52 |
+
|
53 |
+
lint-python:
|
54 |
+
runs-on: ubuntu-16.04
|
55 |
+
steps:
|
56 |
+
- uses: actions/checkout@v1
|
57 |
+
- uses: actions/setup-python@v1
|
58 |
+
with:
|
59 |
+
python-version: '3.6'
|
60 |
+
architecture: 'x64'
|
61 |
+
- name: 'Install black'
|
62 |
+
run: |
|
63 |
+
python -m pip install -U pip
|
64 |
+
pip install black -c ./tensorboard/pip_package/requirements_dev.txt
|
65 |
+
- run: pip freeze --all
|
66 |
+
- name: 'Lint Python code for style with Black'
|
67 |
+
# You can run `black .` to fix all Black complaints.
|
68 |
+
run: black --check --diff .
|
69 |
+
|
70 |
+
lint-docs:
|
71 |
+
runs-on: ubuntu-16.04
|
72 |
+
steps:
|
73 |
+
- uses: actions/checkout@v1
|
74 |
+
- uses: actions/setup-python@v1
|
75 |
+
with:
|
76 |
+
python-version: '3.6'
|
77 |
+
architecture: 'x64'
|
78 |
+
- name: 'Install yamllint'
|
79 |
+
run: |
|
80 |
+
python -m pip install -U pip
|
81 |
+
pip install yamllint -c ./tensorboard/pip_package/requirements_dev.txt
|
82 |
+
- run: pip freeze --all
|
83 |
+
- name: 'Lint YAML for gotchas with yamllint'
|
84 |
+
# Use '# yamllint disable-line rule:foo' to suppress.
|
85 |
+
run: yamllint -c docs/.yamllint docs docs/.yamllint
|
86 |
+
- name: 'Install the TensorFlow docs notebook tools'
|
87 |
+
run: |
|
88 |
+
nbfmt_version="174c9a5c1cc51a3af1de98d84824c811ecd49029"
|
89 |
+
python3 -m pip install -U git+https://github.com/tensorflow/docs@${nbfmt_version}
|
90 |
+
- name: 'Use nbfmt to check Colab notebooks for formatting'
|
91 |
+
run: git ls-files -z '*.ipynb' | xargs -0 python3 -m tensorflow_docs.tools.nbfmt --test
|
92 |
+
|
93 |
+
lint-frontend:
|
94 |
+
runs-on: ubuntu-16.04
|
95 |
+
steps:
|
96 |
+
- uses: actions/checkout@v1
|
97 |
+
- uses: actions/setup-node@v1
|
98 |
+
- run: yarn install --ignore-engines
|
99 |
+
# You can run `yarn fix-lint` to fix all Prettier complaints.
|
100 |
+
- run: yarn lint
|
101 |
+
# Make sure no tests are skipped with "focused" tests.
|
102 |
+
- run: |
|
103 |
+
! git grep -E 'f(it|describe)\(' 'tensorboard/*_test.ts'
|
104 |
+
# Make sure no one depends on Angular material and CDK directly. Please
|
105 |
+
# import the indirection in //tensorboard/webapp/angular.
|
106 |
+
- run: |
|
107 |
+
! git grep -E '"@npm//@angular/material"|"@npm//@angular/cdk"' 'tensorboard/*/BUILD' ':!tensorboard/webapp/BUILD' ':!tensorboard/webapp/angular/BUILD'
|
108 |
+
# Cannot directly depend on d3 in webapp. Must depend on
|
109 |
+
# `//tensorboard/webapp/third_party:d3` instead.
|
110 |
+
- run: |
|
111 |
+
! git grep -E '@npm//d3' 'tensorboard/webapp/**/*BUILD' ':!tensorboard/webapp/third_party/**'
|
112 |
+
|
113 |
+
lint-build:
|
114 |
+
runs-on: ubuntu-16.04
|
115 |
+
steps:
|
116 |
+
- uses: actions/checkout@v1
|
117 |
+
- name: 'Set up Buildifier'
|
118 |
+
run: |
|
119 |
+
ci/download_buildifier.sh "${BUILDTOOLS_VERSION}" "${BUILDIFIER_SHA256SUM}" ~/buildifier
|
120 |
+
sudo mv ~/buildifier /usr/local/bin/buildifier
|
121 |
+
- name: 'Set up Buildozer'
|
122 |
+
run: |
|
123 |
+
ci/download_buildozer.sh "${BUILDTOOLS_VERSION}" "${BUILDOZER_SHA256SUM}" ~/buildozer
|
124 |
+
sudo mv ~/buildozer /usr/local/bin/buildozer
|
125 |
+
- name: 'Lint BUILD files'
|
126 |
+
# TODO(tensorboard-team): address all lint warnings and remove the exemption.
|
127 |
+
run:
|
128 |
+
git ls-files -z '*BUILD' third_party/js.bzl third_party/workspace.bzl | xargs -0 buildifier --mode=check --lint=warn
|
129 |
+
--warnings=-native-py,-native-java
|
130 |
+
- run: ./tensorboard/tools/mirror_urls_test.sh
|
131 |
+
- name: 'Lint for no py2 BUILD targets'
|
132 |
+
# Use | to start a literal so YAML doesn't complain about the '!' character.
|
133 |
+
run: |
|
134 |
+
! git grep 'python_version = "PY2"' '*BUILD'
|
135 |
+
- name: 'No comments on licenses rule'
|
136 |
+
# Assert buildozer error code for 'success, when no changes were made'.
|
137 |
+
# https://github.com/bazelbuild/buildtools/blob/master/buildozer/README.md#error-code
|
138 |
+
run: |
|
139 |
+
buildozer '//tensorboard/...:%licenses' remove_comment && false || test $? = 3
|
140 |
+
|
141 |
+
lint-proto:
|
142 |
+
runs-on: ubuntu-16.04
|
143 |
+
steps:
|
144 |
+
- uses: actions/checkout@v1
|
145 |
+
- name: clang-format lint
|
146 |
+
uses: DoozyX/clang-format-lint-action@v0.5
|
147 |
+
with:
|
148 |
+
source: ./tensorboard
|
149 |
+
# Exclude tensorboard/compat because the source of truth is TensorFlow.
|
150 |
+
exclude: ./tensorboard/compat/proto
|
151 |
+
extensions: 'proto'
|
152 |
+
clangFormatVersion: 9
|
153 |
+
|
154 |
+
check-misc:
|
155 |
+
runs-on: ubuntu-16.04
|
156 |
+
steps:
|
157 |
+
- uses: actions/checkout@v1
|
158 |
+
- run: ./tensorboard/tools/do_not_submit_test.sh
|
159 |
+
- run: ./tensorboard/tools/license_test.sh
|
160 |
+
- run: ./tensorboard/tools/whitespace_hygiene_test.py
|
saved_models/tensorboard/.gitignore
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/bazel-*
|
2 |
+
/.idea
|
3 |
+
.DS_Store
|
4 |
+
*.pyc
|
5 |
+
*.egg-info/
|
6 |
+
*.swp
|
7 |
+
node_modules
|
saved_models/tensorboard/.prettierrc.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"singleQuote": true,
|
3 |
+
"trailingComma": "es5",
|
4 |
+
"bracketSpacing": false,
|
5 |
+
"arrowParens": "always"
|
6 |
+
}
|
saved_models/tensorboard/.travis.yml
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
dist: xenial
|
2 |
+
language: python
|
3 |
+
python:
|
4 |
+
- "3.6"
|
5 |
+
|
6 |
+
branches:
|
7 |
+
only:
|
8 |
+
- master
|
9 |
+
- /^\d+\.\d+(\.\d+)?(-\S*)?$/
|
10 |
+
|
11 |
+
# Update the `nvm use` stanza below when updating this.
|
12 |
+
node_js:
|
13 |
+
- "11"
|
14 |
+
|
15 |
+
env:
|
16 |
+
# Keep this Bazel version in sync with the `versions.check` directive
|
17 |
+
# near the top of our WORKSPACE file.
|
18 |
+
#
|
19 |
+
# Grab the BAZEL_SHA256SUM from the Bazel releases page; e.g.:
|
20 |
+
# bazel-0.20.0-linux-x86_64.sha256
|
21 |
+
global:
|
22 |
+
- BAZEL=2.1.0
|
23 |
+
- BAZEL_SHA256SUM=e13581d44faad6ac807dd917e682fef20359d26728166ac35dadd8ee653a580d
|
24 |
+
matrix:
|
25 |
+
- TF_VERSION_ID=tf-nightly
|
26 |
+
- TF_VERSION_ID= # Do not install TensorFlow in this case
|
27 |
+
|
28 |
+
cache:
|
29 |
+
# Don't cache the Pip directory. We pull in a new `tf-nightly` wheel
|
30 |
+
# every day, and Pip caches are never evicted, so this quickly bloats
|
31 |
+
# to many gigabytes and adds minutes to the CI time.
|
32 |
+
pip: false
|
33 |
+
# Cache directories for Bazel. See ci/bazelrc for details.
|
34 |
+
directories:
|
35 |
+
- $HOME/.cache/tb-bazel-repo
|
36 |
+
- $HOME/.cache/tb-bazel-disk
|
37 |
+
|
38 |
+
# Each bullet point is displayed in the Travis log as one collapsed line, which
|
39 |
+
# indicates how long it took. Travis will check the return code at the end. We
|
40 |
+
# can't use `set -e` in the YAML file since it might impact Travis internals.
|
41 |
+
# If inline scripts get too long, Travis surprisingly prints them twice.
|
42 |
+
|
43 |
+
before_install:
|
44 |
+
- elapsed() { TZ=UTC printf "Time %(%T)T %s\n" "$SECONDS" "$1"; }
|
45 |
+
- elapsed "before_install"
|
46 |
+
- ci/download_bazel.sh "${BAZEL}" "${BAZEL_SHA256SUM}" ~/bazel
|
47 |
+
- sudo mv ~/bazel /usr/local/bin/bazel
|
48 |
+
- cp ci/bazelrc ~/.bazelrc
|
49 |
+
- elapsed "before_install (done)"
|
50 |
+
|
51 |
+
install:
|
52 |
+
- elapsed "install"
|
53 |
+
- "PY3=\"$(python -c 'if __import__(\"sys\").version_info[0] > 2: print(1)')\""
|
54 |
+
# Older versions of Pip sometimes resolve specifiers like `tf-nightly`
|
55 |
+
# to versions other than the most recent(!).
|
56 |
+
- pip install -U pip
|
57 |
+
# Uninstall older Travis numpy to avoid upgrade-in-place issues.
|
58 |
+
- pip uninstall -y numpy
|
59 |
+
- |
|
60 |
+
pip install \
|
61 |
+
-r tensorboard/pip_package/requirements.txt \
|
62 |
+
-r tensorboard/pip_package/requirements_dev.txt \
|
63 |
+
;
|
64 |
+
# Keep the node version in sync with node_js key above.
|
65 |
+
- nvm use v11
|
66 |
+
- |
|
67 |
+
# Install TensorFlow if requested
|
68 |
+
if [ -n "${TF_VERSION_ID}" ]; then
|
69 |
+
pip install -I "${TF_VERSION_ID}"
|
70 |
+
fi
|
71 |
+
# Workaround for https://github.com/travis-ci/travis-ci/issues/7940
|
72 |
+
- sudo rm -f /etc/boto.cfg
|
73 |
+
- pip freeze # print installed distributions, for debugging purposes
|
74 |
+
- elapsed "install (done)"
|
75 |
+
|
76 |
+
before_script:
|
77 |
+
# Note: Lint checks happen on GitHub Actions; see .github/workflows/ci.yml.
|
78 |
+
- elapsed "before_script"
|
79 |
+
- |
|
80 |
+
# Specify subset of tests to run depending on TF installation config.
|
81 |
+
# We condition the value of --test_tag_filters so that we can run the
|
82 |
+
# bazel test command unconditionally which produces nicer log output.
|
83 |
+
if [ -z "${TF_VERSION_ID}" ]; then
|
84 |
+
test_tag_filters=support_notf
|
85 |
+
else
|
86 |
+
test_tag_filters=
|
87 |
+
fi
|
88 |
+
- elapsed "before_script (done)"
|
89 |
+
|
90 |
+
# Commands in this section should only fail if it's our fault. Travis will
|
91 |
+
# categorize them as 'failed', rather than 'error' for other sections.
|
92 |
+
script:
|
93 |
+
- elapsed "script"
|
94 |
+
# Note: bazel test implies fetch+build, but this gives us timing.
|
95 |
+
- elapsed && bazel fetch //tensorboard/...
|
96 |
+
- elapsed && bazel build //tensorboard/...
|
97 |
+
- elapsed && bazel test //tensorboard/... --test_tag_filters="${test_tag_filters}"
|
98 |
+
- elapsed && bazel run //tensorboard/pip_package:test_pip_package -- --tf-version "${TF_VERSION_ID}"
|
99 |
+
# Run manual S3 test
|
100 |
+
- elapsed && bazel test //tensorboard/compat/tensorflow_stub:gfile_s3_test
|
101 |
+
- elapsed && bazel test //tensorboard/summary/writer:event_file_writer_s3_test
|
102 |
+
- elapsed "script (done)"
|
103 |
+
|
104 |
+
after_script:
|
105 |
+
# Bazel launches daemons unless --batch is used.
|
106 |
+
- elapsed "after_script"
|
107 |
+
- bazel shutdown
|
108 |
+
|
109 |
+
notifications:
|
110 |
+
email: false
|
saved_models/tensorboard/.vscode/settings.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"python.pythonPath": "/usr/bin/python"
|
3 |
+
}
|
saved_models/tensorboard/ADDING_A_PLUGIN.md
ADDED
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Developing a TensorBoard plugin
|
2 |
+
|
3 |
+
## Overview
|
4 |
+
|
5 |
+
This document will explain high level concepts using the [basic summary example][example-basic] and provide guidelines on plugin authorship.
|
6 |
+
|
7 |
+
To get started right away, you can clone one of these examples:
|
8 |
+
- [Basic summary 'Greeter'](./tensorboard/examples/plugins/example_basic)
|
9 |
+
- [Raw scalars](./tensorboard/examples/plugins/example_raw_scalars)
|
10 |
+
|
11 |
+
![Example screenshot](./docs/images/example_basic.png "Basic example plugin")
|
12 |
+
|
13 |
+
[example-basic]: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/examples/plugins/example_basic
|
14 |
+
|
15 |
+
### Architecture
|
16 |
+
|
17 |
+
You know (and, we hope, love!) TensorBoard’s core features! However, in every TensorBoard user’s life, there comes a time when you want some cool new visualization that just doesn’t exist yet. That’s what the plugin system is for.
|
18 |
+
|
19 |
+
A plugin is comprised of three components:
|
20 |
+
|
21 |
+
- The **Backend** is where you write Python code that does post-processing of your data and serves the data to your plugin frontend in the browser.
|
22 |
+
- The **Frontend** is where your custom visualization lives.
|
23 |
+
- The optional **Summary** component is how users of your plugins will write data that your plugin can read from their TensorFlow programs. See [docs](https://www.tensorflow.org/api_docs/python/tf/summary) for details.
|
24 |
+
|
25 |
+
The backend and frontend operate within a plugin lifecycle:
|
26 |
+
|
27 |
+
- **1) Plugin initializes**: When a user starts `tensorboard --logdir ...`, TensorBoard discovers available plugins, allows them to parse command line flags if needed, and configures URL routes to be served.
|
28 |
+
|
29 |
+
- **2) User loads TensorBoard**: When a user opens the frontend in a web browser, TensorBoard reads plugin frontend metadata and collects all active plugins.
|
30 |
+
|
31 |
+
- **3) User opens the dashboard**: When a user selects the plugin's dashboard in the UI, TensorBoard loads an IFrame with the plugin's ES module and tells it to render.
|
32 |
+
|
33 |
+
- **4) Plugin handles routes**: When a plugin's frontend makes URL requests to its backend, route handlers can respond with collected data.
|
34 |
+
|
35 |
+
|
36 |
+
### Backend: How the plugin processes data, and sends it to the browser
|
37 |
+
|
38 |
+
#### Terminology
|
39 |
+
|
40 |
+
First, let's define some terminology used in TensorBoard. Definitions can be found in [`base_plugin.py`].
|
41 |
+
|
42 |
+
- `TBPlugin`: The base class for all plugins. Can be used as an entry point. Defining a TBPlugin is required.
|
43 |
+
- `TBLoader`: The base class for plugins requiring flag parsing or custom loading. Defining a TBLoader is optional.
|
44 |
+
- `TBContext`: The container of information passed from TensorBoard core to plugins when they are constructed. Includes 'logdir', 'flags', 'multiplexer', etc.
|
45 |
+
- `EventMultiplexer`: The mechanism for reading event data across runs and tags. Other multiplexers exist for database providers, etc. Do not read events directly.
|
46 |
+
|
47 |
+
A plugin backend is responsible for providing information about its frontend counterpart, serving frontend resources, and surfacing necessary data to the frontend by implementing routes (endpoints). TensorBoard begins by detecting plugins using the [Python `entry_points` mechanism][entrypoints-spec]; see the example plugin's [`setup.py`][entrypoints-declaration] for a full example of how to declare a plugin. The entry point must define either a `TBPlugin` or `TBLoader` class.
|
48 |
+
|
49 |
+
[entrypoints-spec]: https://packaging.python.org/specifications/entry-points/
|
50 |
+
[entrypoints-declaration]: https://github.com/tensorflow/tensorboard/blob/373eb09e4c5d2b3cc2493f0949dc4be6b6a45e81/tensorboard/plugins/example/setup.py#L31-L35
|
51 |
+
[`base_plugin.py`]: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/base_plugin.py
|
52 |
+
|
53 |
+
You can start building the backend by subclassing `TBPlugin` in [`base_plugin.py`] with this structure:
|
54 |
+
|
55 |
+
```python
|
56 |
+
class MyPlugin(base_plugin.TBPlugin):
|
57 |
+
plugin_name = "my_awesome_plugin"
|
58 |
+
|
59 |
+
def __init__(self, context): # ...
|
60 |
+
|
61 |
+
def get_plugin_apps(self):
|
62 |
+
return { "/tags": self._serve_tags }
|
63 |
+
|
64 |
+
### Upon loading TensorBoard in browser
|
65 |
+
def is_active(self): # ...
|
66 |
+
|
67 |
+
def frontend_metadata(self):
|
68 |
+
return base_plugin.FrontendMetadata(es_module_path = "/index.js", tab_name = "Awesome ML")
|
69 |
+
|
70 |
+
### Route handling
|
71 |
+
def _serve_tags(self): # Returns a WSGI application that responds to the request.
|
72 |
+
```
|
73 |
+
|
74 |
+
#### TBPlugin
|
75 |
+
- `plugin_name`: Required field used as a unique ID for the plugin. This must only contain alphanumeric characters, hyphens, and underscores.
|
76 |
+
- `get_plugin_apps()`: This should return a `dict` mapping route paths to WSGI applications: e.g., `"/tags"` might map to `self._serve_tags`.
|
77 |
+
- `is_active()`: This should return whether the plugin is active (whether there exists relevant data for the plugin to process). TensorBoard will hide inactive plugins from the main navigation bar. We strongly recommend this to be a cheap operation.
|
78 |
+
- `frontend_metadata()`: Defines how the plugin will be displayed on the frontend. See [`base_plugin.FrontendMetadata()`](https://github.com/tensorflow/tensorboard/blob/18dec9279e18a8222c9d83f90219ecddad591c46/tensorboard/plugins/base_plugin.py#L101).
|
79 |
+
- `disable_reload`: Whether to disable the reload button and auto-reload timer. A `bool`; defaults to `False`.
|
80 |
+
- `es_module_path`: ES module to use as an entry point to this plugin. A `str` that is a key in the result of `get_plugin_apps()`.
|
81 |
+
- `remove_dom`: Whether to remove the plugin DOM when switching to a different plugin. A `bool`; defaults to `False`.
|
82 |
+
- `tab_name`: Name to show in the menu item for this dashboard within the navigation bar. May differ from the plugin name. An optional `str`, that defaults to the plugin name.
|
83 |
+
|
84 |
+
If your plugin requires parsing flags or custom loading, consider defining a `TBLoader` as the entry point. Doing so is optional.
|
85 |
+
|
86 |
+
For example:
|
87 |
+
|
88 |
+
```python
|
89 |
+
class MyLoader(base_plugin.TBLoader):
|
90 |
+
def define_flags(self, parser):
|
91 |
+
parser.add_argument_group('custom').add_argument('--enable_my_extras')
|
92 |
+
|
93 |
+
def fix_flags(self, flags):
|
94 |
+
if flags.enable_my_extras:
|
95 |
+
raise ValueError('Extras not ready')
|
96 |
+
|
97 |
+
def load(self, context):
|
98 |
+
return MyPlugin(context)
|
99 |
+
```
|
100 |
+
|
101 |
+
#### TBLoader
|
102 |
+
- `define_flags(parser)`: Optional method that takes an argparse.Namespace and exposes command-line flags. Please prefix flags with the name of the plugin to avoid collision.
|
103 |
+
- `fix_flags(flags)`: Optional method needed to fix or sanitize command-line flags.
|
104 |
+
- `load(context)`: Required method that takes a TBContext and returns a TBPlugin instance.
|
105 |
+
|
106 |
+
It's recommended that plugins using flags call the `parser.add_argument_group(plugin_name)`. To learn more about the flag definition, see [docs](https://docs.python.org/library/argparse.html#adding-arguments)
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
## Reading data from event files
|
111 |
+
|
112 |
+
On instantiation, a plugin is provided a [`PluginEventMultiplexer`] object as a field on the TBContext, from which to read data. The `PluginRunToTagToContent` method on the multiplexer returns a dictionary containing all run–tag pairs and associated summary metadata for your plugin.
|
113 |
+
|
114 |
+
Plugins are not technically restricted from arbitrary file system and network access, but we strongly recommend using the multiplexer exclusively. This abstracts over the filesystem (local or remote), provides a consistent user experience for runs and tags across plugins, and is optimized for TensorBoard read patterns.
|
115 |
+
|
116 |
+
[`PluginEventMultiplexer`]: https://github.com/tensorflow/tensorboard/blob/master/tensorboard/backend/event_processing/plugin_event_multiplexer.py
|
117 |
+
|
118 |
+
Example use of the multiplexer:
|
119 |
+
```python
|
120 |
+
class MyPlugin(base_plugin.TBPlugin):
|
121 |
+
def __init__(self, context):
|
122 |
+
self.multiplexer = context.multiplexer
|
123 |
+
|
124 |
+
def preprocess_data(self):
|
125 |
+
"""
|
126 |
+
{runName: { images: [tag1, tag2, tag3],
|
127 |
+
scalarValues: [tagA, tagB, tagC],
|
128 |
+
histograms: [tagX, tagY, tagZ],
|
129 |
+
compressedHistograms: [tagX, tagY, tagZ],
|
130 |
+
graph: true, meta_graph: true}}
|
131 |
+
"""
|
132 |
+
runs = self.multiplexer.Runs()
|
133 |
+
|
134 |
+
"""
|
135 |
+
[
|
136 |
+
{wall_time: 100..., step: 1, tensor_proto: ...},
|
137 |
+
{wall_time: 100..., step: 2, tensor_proto: ...},
|
138 |
+
...
|
139 |
+
]
|
140 |
+
"""
|
141 |
+
events = self.multiplexer.Tensors(run, tag)
|
142 |
+
|
143 |
+
"""{run: {tag: content}, ...}"""
|
144 |
+
content = PluginRunToTagToContent(plugin_name)
|
145 |
+
```
|
146 |
+
|
147 |
+
For the complete EventMultiplexer API, see [`PluginEventMultiplexer`][`PluginEventMultiplexer`].
|
148 |
+
|
149 |
+
### Frontend: How the plugin visualizes your new data
|
150 |
+
|
151 |
+
Now that we have an API, it’s time for the cool part: adding a visualization!
|
152 |
+
|
153 |
+
TensorBoard does not impose any framework/tool requirements for building a frontend—you can use React, Vue.js, jQuery, DOM API, or any new famous frameworks and use, for example, Webpack to create a JavaScript bundle. TensorBoard only requires an [ES Module] that is an entry point to your frontend ([example ES module][example-es-module]). Do note that all frontend resources have to be served by the plugin backend ([example backend][example-backend]).
|
154 |
+
|
155 |
+
When the dashboard opens, TensorBoard will create an IFrame and load the ES module defined by the backend's metadata. It will call the `render()` method in the module.
|
156 |
+
|
157 |
+
[ES Module]: https://hacks.mozilla.org/2018/03/es-modules-a-cartoon-deep-dive/
|
158 |
+
[example-es-module]: https://github.com/tensorflow/tensorboard/blob/373eb09e4c5d2b3cc2493f0949dc4be6b6a45e81/tensorboard/plugins/example/tensorboard_plugin_example/static/index.js#L16
|
159 |
+
[example-backend]: https://github.com/tensorflow/tensorboard/blob/373eb09e4c5d2b3cc2493f0949dc4be6b6a45e81/tensorboard/plugins/example/tensorboard_plugin_example/plugin.py#L45
|
160 |
+
|
161 |
+
Consistency in user interface and experience, we believe, is important for happy users; for example, a run selection should be consistent for all plugins in TensorBoard. TensorBoard will provide a library that helps you build a dashboard like Scalars dashboard by providing UI components. We _will_ provide a library that can be bundled into your frontend binary (please follow [issue #2357][dynamic-plugin-tracking-bug] for progress):
|
162 |
+
|
163 |
+
[dynamic-plugin-tracking-bug]: https://github.com/tensorflow/tensorboard/issues/2357
|
164 |
+
|
165 |
+
We recommend that you vendor all resources required to use your plugin, including scripts, stylesheets, fonts, and images. All built-in TensorBoard plugins follow this policy.
|
166 |
+
|
167 |
+
|
168 |
+
### Summaries: How the plugin gets data
|
169 |
+
|
170 |
+
Your plugin will need to provide a way for users to log **summaries**, which are the mechanism for getting data from a TensorFlow model to disk and eventually into your TensorBoard plugin for visualization. For example, the example plugin provides a novel [“greeting” TensorFlow op][greeting-op] that writes greeting summaries. A summary is a protocol buffer with the following information:
|
171 |
+
|
172 |
+
- tag: A string that uniquely identifies a data series, often supplied by the user (e.g., “loss”).
|
173 |
+
- step: A temporal index (an integer), often batch number of epoch number.
|
174 |
+
- tensor: The actual value for a tag–step combination, as a tensor of arbitrary shape and dtype (e.g., `0.123`, or `["one", "two"]`).
|
175 |
+
- metadata: Specifies [which plugin owns the summary][owner-identifier], and provides an arbitrary plugin-specific payload.
|
176 |
+
|
177 |
+
[greeting-op]: https://github.com/tensorflow/tensorboard/blob/373eb09e4c5d2b3cc2493f0949dc4be6b6a45e81/tensorboard/plugins/example/tensorboard_plugin_example/summary_v2.py#L28-L48
|
178 |
+
[owner-identifier]: https://github.com/tensorflow/tensorboard/blob/373eb09e4c5d2b3cc2493f0949dc4be6b6a45e81/tensorboard/plugins/example/tensorboard_plugin_example/summary_v2.py#L64
|
179 |
+
|
180 |
+
## Distribution
|
181 |
+
|
182 |
+
A plugin should be distributed as a Pip package, and may be uploaded to PyPI. Please follow the [PyPI distribution archive upload guide][pypi-upload] for more information.
|
183 |
+
|
184 |
+
[pypi-upload]: https://packaging.python.org/tutorials/packaging-projects/#uploading-the-distribution-archives
|
185 |
+
|
186 |
+
## Guideline on naming and branding
|
187 |
+
|
188 |
+
We recommend that your plugin have an intuitive name that reflects the functionality—users, seeing the name, should be able to identify that it is a TensorBoard plugin and its function. Also, we recommend that you include the name of the plugin as part of the Pip package. For instance, a plugin `foo` should be distributed in a Pip package named `tensorboard_plugin_foo`.
|
189 |
+
|
190 |
+
A predictable package naming scheme not only helps users find your plugin, but also helps you find a unique plugin name by surveying PyPI. TensorBoard requires that all loaded plugins have unique names. However, the plugin name can differ from the [user-facing display name][display-name]; display names are not strictly required to be unique.
|
191 |
+
|
192 |
+
[display-name]: https://github.com/tensorflow/tensorboard/blob/373eb09e4c5d2b3cc2493f0949dc4be6b6a45e81/tensorboard/plugins/base_plugin.py#L35-L39
|
193 |
+
|
194 |
+
Lastly, when distributing a custom plugin of TensorBoard, we recommend that it be branded as “Foo for TensorBoard” (rather than “TensorBoard Foo”). TensorBoard is distributed under the Apache 2.0 license, but the name itself is a trademark of Google LLC.
|
saved_models/tensorboard/AUTHORS
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This is the official list of TensorFlow authors for copyright purposes.
|
2 |
+
# This file is distinct from the CONTRIBUTORS files.
|
3 |
+
# See the latter for an explanation.
|
4 |
+
|
5 |
+
# Names should be added to this file as:
|
6 |
+
# Name or Organization <email address>
|
7 |
+
# The email address is not required for organizations.
|
8 |
+
|
9 |
+
Google Inc.
|
10 |
+
Yuan Tang terrytangyuan@gmail.com
|
saved_models/tensorboard/BUILD
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
load("@npm_bazel_typescript//:index.bzl", "ts_config")
|
2 |
+
|
3 |
+
licenses(["notice"])
|
4 |
+
|
5 |
+
exports_files(["tsconfig.json"])
|
6 |
+
|
7 |
+
ts_config(
|
8 |
+
name = "tsconfig-test",
|
9 |
+
src = "tsconfig-test.json",
|
10 |
+
visibility = [
|
11 |
+
"//tensorboard:internal",
|
12 |
+
],
|
13 |
+
deps = [":tsconfig.json"],
|
14 |
+
)
|
15 |
+
|
16 |
+
# Inspired from internal tsconfig generation for project like TensorBoard.
|
17 |
+
ts_config(
|
18 |
+
name = "tsconfig-lax",
|
19 |
+
src = "tsconfig-lax.json",
|
20 |
+
visibility = [
|
21 |
+
"//tensorboard:internal",
|
22 |
+
],
|
23 |
+
deps = [],
|
24 |
+
)
|
saved_models/tensorboard/BUILD-local_execution_config_python.txt
ADDED
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
licenses(["restricted"])
|
2 |
+
|
3 |
+
package(default_visibility = ["//visibility:public"])
|
4 |
+
|
5 |
+
# Point both runtimes to the same python binary to ensure we always
|
6 |
+
# use the python binary specified by ./configure.py script.
|
7 |
+
load("@bazel_tools//tools/python:toolchain.bzl", "py_runtime_pair")
|
8 |
+
|
9 |
+
py_runtime(
|
10 |
+
name = "py2_runtime",
|
11 |
+
interpreter_path = "C:\\Users\\llmhy\\AppData\\Local\\Programs\\Python\\Python310\\python.exe",
|
12 |
+
python_version = "PY2",
|
13 |
+
)
|
14 |
+
|
15 |
+
py_runtime(
|
16 |
+
name = "py3_runtime",
|
17 |
+
interpreter_path = "C:\\Users\\llmhy\\AppData\\Local\\Programs\\Python\\Python310\\python.exe",
|
18 |
+
python_version = "PY3",
|
19 |
+
)
|
20 |
+
|
21 |
+
py_runtime_pair(
|
22 |
+
name = "py_runtime_pair",
|
23 |
+
py2_runtime = ":py2_runtime",
|
24 |
+
py3_runtime = ":py3_runtime",
|
25 |
+
)
|
26 |
+
|
27 |
+
toolchain(
|
28 |
+
name = "py_toolchain",
|
29 |
+
toolchain = ":py_runtime_pair",
|
30 |
+
toolchain_type = "@bazel_tools//tools/python:toolchain_type",
|
31 |
+
target_compatible_with = ["@local_execution_config_platform//:platform_constraint"],
|
32 |
+
exec_compatible_with = ["@local_execution_config_platform//:platform_constraint"],
|
33 |
+
)
|
34 |
+
|
35 |
+
# To build Python C/C++ extension on Windows, we need to link to python import library pythonXY.lib
|
36 |
+
# See https://docs.python.org/3/extending/windows.html
|
37 |
+
cc_import(
|
38 |
+
name = "python_lib",
|
39 |
+
interface_library = select({
|
40 |
+
":windows": ":python_import_lib",
|
41 |
+
# A placeholder for Unix platforms which makes --no_build happy.
|
42 |
+
"//conditions:default": "not-existing.lib",
|
43 |
+
}),
|
44 |
+
system_provided = 1,
|
45 |
+
)
|
46 |
+
|
47 |
+
cc_library(
|
48 |
+
name = "python_headers",
|
49 |
+
hdrs = [":python_include"],
|
50 |
+
deps = select({
|
51 |
+
":windows": [":python_lib"],
|
52 |
+
"//conditions:default": [],
|
53 |
+
}),
|
54 |
+
includes = ["python_include"],
|
55 |
+
)
|
56 |
+
|
57 |
+
cc_library(
|
58 |
+
name = "numpy_headers",
|
59 |
+
hdrs = [":numpy_include"],
|
60 |
+
includes = ["numpy_include"],
|
61 |
+
)
|
62 |
+
|
63 |
+
config_setting(
|
64 |
+
name = "windows",
|
65 |
+
values = {"cpu": "x64_windows"},
|
66 |
+
visibility = ["//visibility:public"],
|
67 |
+
)
|
68 |
+
|
69 |
+
genrule(
|
70 |
+
name = "python_include",
|
71 |
+
outs = [
|
72 |
+
"python_include/Python.h",
|
73 |
+
"python_include/abstract.h",
|
74 |
+
"python_include/bltinmodule.h",
|
75 |
+
"python_include/boolobject.h",
|
76 |
+
"python_include/bytearrayobject.h",
|
77 |
+
"python_include/bytesobject.h",
|
78 |
+
"python_include/cellobject.h",
|
79 |
+
"python_include/ceval.h",
|
80 |
+
"python_include/classobject.h",
|
81 |
+
"python_include/code.h",
|
82 |
+
"python_include/codecs.h",
|
83 |
+
"python_include/compile.h",
|
84 |
+
"python_include/complexobject.h",
|
85 |
+
"python_include/context.h",
|
86 |
+
"python_include/cpython/abstract.h",
|
87 |
+
"python_include/cpython/bytearrayobject.h",
|
88 |
+
"python_include/cpython/bytesobject.h",
|
89 |
+
"python_include/cpython/ceval.h",
|
90 |
+
"python_include/cpython/code.h",
|
91 |
+
"python_include/cpython/compile.h",
|
92 |
+
"python_include/cpython/dictobject.h",
|
93 |
+
"python_include/cpython/fileobject.h",
|
94 |
+
"python_include/cpython/fileutils.h",
|
95 |
+
"python_include/cpython/frameobject.h",
|
96 |
+
"python_include/cpython/import.h",
|
97 |
+
"python_include/cpython/initconfig.h",
|
98 |
+
"python_include/cpython/interpreteridobject.h",
|
99 |
+
"python_include/cpython/listobject.h",
|
100 |
+
"python_include/cpython/methodobject.h",
|
101 |
+
"python_include/cpython/object.h",
|
102 |
+
"python_include/cpython/objimpl.h",
|
103 |
+
"python_include/cpython/odictobject.h",
|
104 |
+
"python_include/cpython/picklebufobject.h",
|
105 |
+
"python_include/cpython/pyctype.h",
|
106 |
+
"python_include/cpython/pydebug.h",
|
107 |
+
"python_include/cpython/pyerrors.h",
|
108 |
+
"python_include/cpython/pyfpe.h",
|
109 |
+
"python_include/cpython/pylifecycle.h",
|
110 |
+
"python_include/cpython/pymem.h",
|
111 |
+
"python_include/cpython/pystate.h",
|
112 |
+
"python_include/cpython/pythonrun.h",
|
113 |
+
"python_include/cpython/pytime.h",
|
114 |
+
"python_include/cpython/sysmodule.h",
|
115 |
+
"python_include/cpython/traceback.h",
|
116 |
+
"python_include/cpython/tupleobject.h",
|
117 |
+
"python_include/cpython/unicodeobject.h",
|
118 |
+
"python_include/datetime.h",
|
119 |
+
"python_include/descrobject.h",
|
120 |
+
"python_include/dictobject.h",
|
121 |
+
"python_include/dynamic_annotations.h",
|
122 |
+
"python_include/enumobject.h",
|
123 |
+
"python_include/errcode.h",
|
124 |
+
"python_include/eval.h",
|
125 |
+
"python_include/exports.h",
|
126 |
+
"python_include/fileobject.h",
|
127 |
+
"python_include/fileutils.h",
|
128 |
+
"python_include/floatobject.h",
|
129 |
+
"python_include/frameobject.h",
|
130 |
+
"python_include/funcobject.h",
|
131 |
+
"python_include/genericaliasobject.h",
|
132 |
+
"python_include/genobject.h",
|
133 |
+
"python_include/import.h",
|
134 |
+
"python_include/internal/pycore_abstract.h",
|
135 |
+
"python_include/internal/pycore_accu.h",
|
136 |
+
"python_include/internal/pycore_asdl.h",
|
137 |
+
"python_include/internal/pycore_ast.h",
|
138 |
+
"python_include/internal/pycore_ast_state.h",
|
139 |
+
"python_include/internal/pycore_atomic.h",
|
140 |
+
"python_include/internal/pycore_atomic_funcs.h",
|
141 |
+
"python_include/internal/pycore_bitutils.h",
|
142 |
+
"python_include/internal/pycore_blocks_output_buffer.h",
|
143 |
+
"python_include/internal/pycore_bytes_methods.h",
|
144 |
+
"python_include/internal/pycore_call.h",
|
145 |
+
"python_include/internal/pycore_ceval.h",
|
146 |
+
"python_include/internal/pycore_code.h",
|
147 |
+
"python_include/internal/pycore_compile.h",
|
148 |
+
"python_include/internal/pycore_condvar.h",
|
149 |
+
"python_include/internal/pycore_context.h",
|
150 |
+
"python_include/internal/pycore_dtoa.h",
|
151 |
+
"python_include/internal/pycore_fileutils.h",
|
152 |
+
"python_include/internal/pycore_format.h",
|
153 |
+
"python_include/internal/pycore_gc.h",
|
154 |
+
"python_include/internal/pycore_getopt.h",
|
155 |
+
"python_include/internal/pycore_gil.h",
|
156 |
+
"python_include/internal/pycore_hamt.h",
|
157 |
+
"python_include/internal/pycore_hashtable.h",
|
158 |
+
"python_include/internal/pycore_import.h",
|
159 |
+
"python_include/internal/pycore_initconfig.h",
|
160 |
+
"python_include/internal/pycore_interp.h",
|
161 |
+
"python_include/internal/pycore_list.h",
|
162 |
+
"python_include/internal/pycore_long.h",
|
163 |
+
"python_include/internal/pycore_moduleobject.h",
|
164 |
+
"python_include/internal/pycore_object.h",
|
165 |
+
"python_include/internal/pycore_parser.h",
|
166 |
+
"python_include/internal/pycore_pathconfig.h",
|
167 |
+
"python_include/internal/pycore_pyarena.h",
|
168 |
+
"python_include/internal/pycore_pyerrors.h",
|
169 |
+
"python_include/internal/pycore_pyhash.h",
|
170 |
+
"python_include/internal/pycore_pylifecycle.h",
|
171 |
+
"python_include/internal/pycore_pymem.h",
|
172 |
+
"python_include/internal/pycore_pystate.h",
|
173 |
+
"python_include/internal/pycore_runtime.h",
|
174 |
+
"python_include/internal/pycore_structseq.h",
|
175 |
+
"python_include/internal/pycore_symtable.h",
|
176 |
+
"python_include/internal/pycore_sysmodule.h",
|
177 |
+
"python_include/internal/pycore_traceback.h",
|
178 |
+
"python_include/internal/pycore_tuple.h",
|
179 |
+
"python_include/internal/pycore_ucnhash.h",
|
180 |
+
"python_include/internal/pycore_unionobject.h",
|
181 |
+
"python_include/internal/pycore_warnings.h",
|
182 |
+
"python_include/interpreteridobject.h",
|
183 |
+
"python_include/intrcheck.h",
|
184 |
+
"python_include/iterobject.h",
|
185 |
+
"python_include/listobject.h",
|
186 |
+
"python_include/longintrepr.h",
|
187 |
+
"python_include/longobject.h",
|
188 |
+
"python_include/marshal.h",
|
189 |
+
"python_include/memoryobject.h",
|
190 |
+
"python_include/methodobject.h",
|
191 |
+
"python_include/modsupport.h",
|
192 |
+
"python_include/moduleobject.h",
|
193 |
+
"python_include/namespaceobject.h",
|
194 |
+
"python_include/object.h",
|
195 |
+
"python_include/objimpl.h",
|
196 |
+
"python_include/opcode.h",
|
197 |
+
"python_include/osdefs.h",
|
198 |
+
"python_include/osmodule.h",
|
199 |
+
"python_include/patchlevel.h",
|
200 |
+
"python_include/py_curses.h",
|
201 |
+
"python_include/pycapsule.h",
|
202 |
+
"python_include/pyconfig.h",
|
203 |
+
"python_include/pydtrace.h",
|
204 |
+
"python_include/pyerrors.h",
|
205 |
+
"python_include/pyexpat.h",
|
206 |
+
"python_include/pyframe.h",
|
207 |
+
"python_include/pyhash.h",
|
208 |
+
"python_include/pylifecycle.h",
|
209 |
+
"python_include/pymacconfig.h",
|
210 |
+
"python_include/pymacro.h",
|
211 |
+
"python_include/pymath.h",
|
212 |
+
"python_include/pymem.h",
|
213 |
+
"python_include/pyport.h",
|
214 |
+
"python_include/pystate.h",
|
215 |
+
"python_include/pystrcmp.h",
|
216 |
+
"python_include/pystrhex.h",
|
217 |
+
"python_include/pystrtod.h",
|
218 |
+
"python_include/pythonrun.h",
|
219 |
+
"python_include/pythread.h",
|
220 |
+
"python_include/rangeobject.h",
|
221 |
+
"python_include/setobject.h",
|
222 |
+
"python_include/sliceobject.h",
|
223 |
+
"python_include/structmember.h",
|
224 |
+
"python_include/structseq.h",
|
225 |
+
"python_include/sysmodule.h",
|
226 |
+
"python_include/token.h",
|
227 |
+
"python_include/traceback.h",
|
228 |
+
"python_include/tracemalloc.h",
|
229 |
+
"python_include/tupleobject.h",
|
230 |
+
"python_include/typeslots.h",
|
231 |
+
"python_include/unicodeobject.h",
|
232 |
+
"python_include/warnings.h",
|
233 |
+
"python_include/weakrefobject.h",
|
234 |
+
],
|
235 |
+
cmd = """
|
236 |
+
cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/Python.h" "$(@D)/python_include/Python.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/abstract.h" "$(@D)/python_include/abstract.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/bltinmodule.h" "$(@D)/python_include/bltinmodule.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/boolobject.h" "$(@D)/python_include/boolobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/bytearrayobject.h" "$(@D)/python_include/bytearrayobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/bytesobject.h" "$(@D)/python_include/bytesobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cellobject.h" "$(@D)/python_include/cellobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/ceval.h" "$(@D)/python_include/ceval.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/classobject.h" "$(@D)/python_include/classobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/code.h" "$(@D)/python_include/code.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/codecs.h" "$(@D)/python_include/codecs.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/compile.h" "$(@D)/python_include/compile.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/complexobject.h" "$(@D)/python_include/complexobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/context.h" "$(@D)/python_include/context.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/abstract.h" "$(@D)/python_include/cpython/abstract.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/bytearrayobject.h" "$(@D)/python_include/cpython/bytearrayobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/bytesobject.h" "$(@D)/python_include/cpython/bytesobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/ceval.h" "$(@D)/python_include/cpython/ceval.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/code.h" "$(@D)/python_include/cpython/code.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/compile.h" "$(@D)/python_include/cpython/compile.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/dictobject.h" "$(@D)/python_include/cpython/dictobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/fileobject.h" "$(@D)/python_include/cpython/fileobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/fileutils.h" "$(@D)/python_include/cpython/fileutils.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/frameobject.h" "$(@D)/python_include/cpython/frameobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/import.h" "$(@D)/python_include/cpython/import.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/initconfig.h" "$(@D)/python_include/cpython/initconfig.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/interpreteridobject.h" "$(@D)/python_include/cpython/interpreteridobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/listobject.h" "$(@D)/python_include/cpython/listobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/methodobject.h" "$(@D)/python_include/cpython/methodobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/object.h" "$(@D)/python_include/cpython/object.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/objimpl.h" "$(@D)/python_include/cpython/objimpl.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/odictobject.h" "$(@D)/python_include/cpython/odictobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/picklebufobject.h" "$(@D)/python_include/cpython/picklebufobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/pyctype.h" "$(@D)/python_include/cpython/pyctype.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/pydebug.h" "$(@D)/python_include/cpython/pydebug.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/pyerrors.h" "$(@D)/python_include/cpython/pyerrors.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/pyfpe.h" "$(@D)/python_include/cpython/pyfpe.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/pylifecycle.h" "$(@D)/python_include/cpython/pylifecycle.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/pymem.h" "$(@D)/python_include/cpython/pymem.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/pystate.h" "$(@D)/python_include/cpython/pystate.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/pythonrun.h" "$(@D)/python_include/cpython/pythonrun.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/pytime.h" "$(@D)/python_include/cpython/pytime.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/sysmodule.h" "$(@D)/python_include/cpython/sysmodule.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/traceback.h" "$(@D)/python_include/cpython/traceback.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/tupleobject.h" "$(@D)/python_include/cpython/tupleobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/cpython/unicodeobject.h" "$(@D)/python_include/cpython/unicodeobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/datetime.h" "$(@D)/python_include/datetime.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/descrobject.h" "$(@D)/python_include/descrobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/dictobject.h" "$(@D)/python_include/dictobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/dynamic_annotations.h" "$(@D)/python_include/dynamic_annotations.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/enumobject.h" "$(@D)/python_include/enumobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/errcode.h" "$(@D)/python_include/errcode.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/eval.h" "$(@D)/python_include/eval.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/exports.h" "$(@D)/python_include/exports.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/fileobject.h" "$(@D)/python_include/fileobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/fileutils.h" "$(@D)/python_include/fileutils.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/floatobject.h" "$(@D)/python_include/floatobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/frameobject.h" "$(@D)/python_include/frameobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/funcobject.h" "$(@D)/python_include/funcobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/genericaliasobject.h" "$(@D)/python_include/genericaliasobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/genobject.h" "$(@D)/python_include/genobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/import.h" "$(@D)/python_include/import.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_abstract.h" "$(@D)/python_include/internal/pycore_abstract.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_accu.h" "$(@D)/python_include/internal/pycore_accu.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_asdl.h" "$(@D)/python_include/internal/pycore_asdl.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_ast.h" "$(@D)/python_include/internal/pycore_ast.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_ast_state.h" "$(@D)/python_include/internal/pycore_ast_state.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_atomic.h" "$(@D)/python_include/internal/pycore_atomic.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_atomic_funcs.h" "$(@D)/python_include/internal/pycore_atomic_funcs.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_bitutils.h" "$(@D)/python_include/internal/pycore_bitutils.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_blocks_output_buffer.h" "$(@D)/python_include/internal/pycore_blocks_output_buffer.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_bytes_methods.h" "$(@D)/python_include/internal/pycore_bytes_methods.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_call.h" "$(@D)/python_include/internal/pycore_call.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_ceval.h" "$(@D)/python_include/internal/pycore_ceval.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_code.h" "$(@D)/python_include/internal/pycore_code.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_compile.h" "$(@D)/python_include/internal/pycore_compile.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_condvar.h" "$(@D)/python_include/internal/pycore_condvar.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_context.h" "$(@D)/python_include/internal/pycore_context.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_dtoa.h" "$(@D)/python_include/internal/pycore_dtoa.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_fileutils.h" "$(@D)/python_include/internal/pycore_fileutils.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_format.h" "$(@D)/python_include/internal/pycore_format.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_gc.h" "$(@D)/python_include/internal/pycore_gc.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_getopt.h" "$(@D)/python_include/internal/pycore_getopt.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_gil.h" "$(@D)/python_include/internal/pycore_gil.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_hamt.h" "$(@D)/python_include/internal/pycore_hamt.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_hashtable.h" "$(@D)/python_include/internal/pycore_hashtable.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_import.h" "$(@D)/python_include/internal/pycore_import.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_initconfig.h" "$(@D)/python_include/internal/pycore_initconfig.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_interp.h" "$(@D)/python_include/internal/pycore_interp.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_list.h" "$(@D)/python_include/internal/pycore_list.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_long.h" "$(@D)/python_include/internal/pycore_long.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_moduleobject.h" "$(@D)/python_include/internal/pycore_moduleobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_object.h" "$(@D)/python_include/internal/pycore_object.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_parser.h" "$(@D)/python_include/internal/pycore_parser.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_pathconfig.h" "$(@D)/python_include/internal/pycore_pathconfig.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_pyarena.h" "$(@D)/python_include/internal/pycore_pyarena.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_pyerrors.h" "$(@D)/python_include/internal/pycore_pyerrors.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_pyhash.h" "$(@D)/python_include/internal/pycore_pyhash.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_pylifecycle.h" "$(@D)/python_include/internal/pycore_pylifecycle.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_pymem.h" "$(@D)/python_include/internal/pycore_pymem.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_pystate.h" "$(@D)/python_include/internal/pycore_pystate.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_runtime.h" "$(@D)/python_include/internal/pycore_runtime.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_structseq.h" "$(@D)/python_include/internal/pycore_structseq.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_symtable.h" "$(@D)/python_include/internal/pycore_symtable.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_sysmodule.h" "$(@D)/python_include/internal/pycore_sysmodule.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_traceback.h" "$(@D)/python_include/internal/pycore_traceback.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_tuple.h" "$(@D)/python_include/internal/pycore_tuple.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_ucnhash.h" "$(@D)/python_include/internal/pycore_ucnhash.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_unionobject.h" "$(@D)/python_include/internal/pycore_unionobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/internal/pycore_warnings.h" "$(@D)/python_include/internal/pycore_warnings.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/interpreteridobject.h" "$(@D)/python_include/interpreteridobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/intrcheck.h" "$(@D)/python_include/intrcheck.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/iterobject.h" "$(@D)/python_include/iterobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/listobject.h" "$(@D)/python_include/listobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/longintrepr.h" "$(@D)/python_include/longintrepr.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/longobject.h" "$(@D)/python_include/longobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/marshal.h" "$(@D)/python_include/marshal.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/memoryobject.h" "$(@D)/python_include/memoryobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/methodobject.h" "$(@D)/python_include/methodobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/modsupport.h" "$(@D)/python_include/modsupport.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/moduleobject.h" "$(@D)/python_include/moduleobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/namespaceobject.h" "$(@D)/python_include/namespaceobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/object.h" "$(@D)/python_include/object.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/objimpl.h" "$(@D)/python_include/objimpl.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/opcode.h" "$(@D)/python_include/opcode.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/osdefs.h" "$(@D)/python_include/osdefs.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/osmodule.h" "$(@D)/python_include/osmodule.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/patchlevel.h" "$(@D)/python_include/patchlevel.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/py_curses.h" "$(@D)/python_include/py_curses.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pycapsule.h" "$(@D)/python_include/pycapsule.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pyconfig.h" "$(@D)/python_include/pyconfig.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pydtrace.h" "$(@D)/python_include/pydtrace.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pyerrors.h" "$(@D)/python_include/pyerrors.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pyexpat.h" "$(@D)/python_include/pyexpat.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pyframe.h" "$(@D)/python_include/pyframe.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pyhash.h" "$(@D)/python_include/pyhash.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pylifecycle.h" "$(@D)/python_include/pylifecycle.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pymacconfig.h" "$(@D)/python_include/pymacconfig.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pymacro.h" "$(@D)/python_include/pymacro.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pymath.h" "$(@D)/python_include/pymath.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pymem.h" "$(@D)/python_include/pymem.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pyport.h" "$(@D)/python_include/pyport.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pystate.h" "$(@D)/python_include/pystate.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pystrcmp.h" "$(@D)/python_include/pystrcmp.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pystrhex.h" "$(@D)/python_include/pystrhex.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pystrtod.h" "$(@D)/python_include/pystrtod.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pythonrun.h" "$(@D)/python_include/pythonrun.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/pythread.h" "$(@D)/python_include/pythread.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/rangeobject.h" "$(@D)/python_include/rangeobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/setobject.h" "$(@D)/python_include/setobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/sliceobject.h" "$(@D)/python_include/sliceobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/structmember.h" "$(@D)/python_include/structmember.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/structseq.h" "$(@D)/python_include/structseq.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/sysmodule.h" "$(@D)/python_include/sysmodule.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/token.h" "$(@D)/python_include/token.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/traceback.h" "$(@D)/python_include/traceback.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/tracemalloc.h" "$(@D)/python_include/tracemalloc.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/tupleobject.h" "$(@D)/python_include/tupleobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/typeslots.h" "$(@D)/python_include/typeslots.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/unicodeobject.h" "$(@D)/python_include/unicodeobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/warnings.h" "$(@D)/python_include/warnings.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/include/weakrefobject.h" "$(@D)/python_include/weakrefobject.h"
|
237 |
+
""",
|
238 |
+
)
|
239 |
+
|
240 |
+
genrule(
|
241 |
+
name = "numpy_include",
|
242 |
+
outs = [
|
243 |
+
"numpy_include/numpy/.doxyfile",
|
244 |
+
"numpy_include/numpy/__multiarray_api.h",
|
245 |
+
"numpy_include/numpy/__ufunc_api.h",
|
246 |
+
"numpy_include/numpy/_neighborhood_iterator_imp.h",
|
247 |
+
"numpy_include/numpy/_numpyconfig.h",
|
248 |
+
"numpy_include/numpy/arrayobject.h",
|
249 |
+
"numpy_include/numpy/arrayscalars.h",
|
250 |
+
"numpy_include/numpy/experimental_dtype_api.h",
|
251 |
+
"numpy_include/numpy/halffloat.h",
|
252 |
+
"numpy_include/numpy/libdivide/LICENSE.txt",
|
253 |
+
"numpy_include/numpy/libdivide/libdivide.h",
|
254 |
+
"numpy_include/numpy/multiarray_api.txt",
|
255 |
+
"numpy_include/numpy/ndarrayobject.h",
|
256 |
+
"numpy_include/numpy/ndarraytypes.h",
|
257 |
+
"numpy_include/numpy/noprefix.h",
|
258 |
+
"numpy_include/numpy/npy_1_7_deprecated_api.h",
|
259 |
+
"numpy_include/numpy/npy_3kcompat.h",
|
260 |
+
"numpy_include/numpy/npy_common.h",
|
261 |
+
"numpy_include/numpy/npy_cpu.h",
|
262 |
+
"numpy_include/numpy/npy_endian.h",
|
263 |
+
"numpy_include/numpy/npy_interrupt.h",
|
264 |
+
"numpy_include/numpy/npy_math.h",
|
265 |
+
"numpy_include/numpy/npy_no_deprecated_api.h",
|
266 |
+
"numpy_include/numpy/npy_os.h",
|
267 |
+
"numpy_include/numpy/numpyconfig.h",
|
268 |
+
"numpy_include/numpy/old_defines.h",
|
269 |
+
"numpy_include/numpy/oldnumeric.h",
|
270 |
+
"numpy_include/numpy/random/bitgen.h",
|
271 |
+
"numpy_include/numpy/random/distributions.h",
|
272 |
+
"numpy_include/numpy/ufunc_api.txt",
|
273 |
+
"numpy_include/numpy/ufuncobject.h",
|
274 |
+
"numpy_include/numpy/utils.h",
|
275 |
+
],
|
276 |
+
cmd = """
|
277 |
+
cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/.doxyfile" "$(@D)/numpy_include/numpy/.doxyfile" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/__multiarray_api.h" "$(@D)/numpy_include/numpy/__multiarray_api.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/__ufunc_api.h" "$(@D)/numpy_include/numpy/__ufunc_api.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/_neighborhood_iterator_imp.h" "$(@D)/numpy_include/numpy/_neighborhood_iterator_imp.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/_numpyconfig.h" "$(@D)/numpy_include/numpy/_numpyconfig.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/arrayobject.h" "$(@D)/numpy_include/numpy/arrayobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/arrayscalars.h" "$(@D)/numpy_include/numpy/arrayscalars.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/experimental_dtype_api.h" "$(@D)/numpy_include/numpy/experimental_dtype_api.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/halffloat.h" "$(@D)/numpy_include/numpy/halffloat.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/libdivide/LICENSE.txt" "$(@D)/numpy_include/numpy/libdivide/LICENSE.txt" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/libdivide/libdivide.h" "$(@D)/numpy_include/numpy/libdivide/libdivide.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/multiarray_api.txt" "$(@D)/numpy_include/numpy/multiarray_api.txt" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/ndarrayobject.h" "$(@D)/numpy_include/numpy/ndarrayobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/ndarraytypes.h" "$(@D)/numpy_include/numpy/ndarraytypes.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/noprefix.h" "$(@D)/numpy_include/numpy/noprefix.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h" "$(@D)/numpy_include/numpy/npy_1_7_deprecated_api.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/npy_3kcompat.h" "$(@D)/numpy_include/numpy/npy_3kcompat.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/npy_common.h" "$(@D)/numpy_include/numpy/npy_common.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/npy_cpu.h" "$(@D)/numpy_include/numpy/npy_cpu.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/npy_endian.h" "$(@D)/numpy_include/numpy/npy_endian.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/npy_interrupt.h" "$(@D)/numpy_include/numpy/npy_interrupt.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/npy_math.h" "$(@D)/numpy_include/numpy/npy_math.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/npy_no_deprecated_api.h" "$(@D)/numpy_include/numpy/npy_no_deprecated_api.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/npy_os.h" "$(@D)/numpy_include/numpy/npy_os.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/numpyconfig.h" "$(@D)/numpy_include/numpy/numpyconfig.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/old_defines.h" "$(@D)/numpy_include/numpy/old_defines.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/oldnumeric.h" "$(@D)/numpy_include/numpy/oldnumeric.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/random/bitgen.h" "$(@D)/numpy_include/numpy/random/bitgen.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/random/distributions.h" "$(@D)/numpy_include/numpy/random/distributions.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/ufunc_api.txt" "$(@D)/numpy_include/numpy/ufunc_api.txt" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/ufuncobject.h" "$(@D)/numpy_include/numpy/ufuncobject.h" && cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/lib/site-packages/numpy/core/include/numpy/utils.h" "$(@D)/numpy_include/numpy/utils.h"
|
278 |
+
""",
|
279 |
+
)
|
280 |
+
|
281 |
+
genrule(
|
282 |
+
name = "python_import_lib",
|
283 |
+
outs = [
|
284 |
+
"python310.lib",
|
285 |
+
],
|
286 |
+
cmd = """
|
287 |
+
cp -f "C:/Users/llmhy/AppData/Local/Programs/Python/Python310/libs/python310.lib" "$(@D)/python310.lib"
|
288 |
+
""",
|
289 |
+
)
|
290 |
+
|
saved_models/tensorboard/CONTRIBUTING.md
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### Contributor License Agreements
|
2 |
+
|
3 |
+
We'd love to accept your patches! Before we can take them, we have to jump a couple of legal hurdles.
|
4 |
+
|
5 |
+
Please fill out either the individual or corporate Contributor License Agreement (CLA).
|
6 |
+
|
7 |
+
* If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html).
|
8 |
+
* If you work for a company that wants to allow you to contribute your work, then you'll need to sign a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html).
|
9 |
+
|
10 |
+
Follow either of the two links above to access the appropriate CLA and instructions for how to sign and return it. Once we receive it, we'll be able to accept your pull requests.
|
11 |
+
|
12 |
+
***NOTE***: Only original source code from you and other people that have signed the CLA can be accepted into the main repository.
|
13 |
+
|
14 |
+
|
15 |
+
### Working with the team
|
16 |
+
If you're planning a larger contribution, please get in touch with the team through a GitHub issue before starting work - we can help guide you, and coordinating up front will make the process smoother.
|
17 |
+
|
18 |
+
If you want to add a major feature, it may be a good candidate for adding a plugin. Let us know via a GitHub issue, and we can guide you in the process.
|
19 |
+
|
20 |
+
### Code reviews
|
21 |
+
All submissions, including submissions by project members, require review. We
|
22 |
+
use GitHub pull requests for this purpose.
|
saved_models/tensorboard/DEVELOPMENT.md
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# How to write your own plugin
|
2 |
+
|
3 |
+
You can extend TensorBoard to show custom visualizations and connect to custom
|
4 |
+
backends by writing a custom plugin. Clone and tinker with one of the
|
5 |
+
[examples][plugin-examples], or learn about the plugin system by following the
|
6 |
+
[ADDING_A_PLUGIN](./ADDING_A_PLUGIN.md) guide. Custom plugins can be
|
7 |
+
[published][plugin-distribution] on PyPI to be shared with the community.
|
8 |
+
|
9 |
+
Developing a custom plugin does not require Bazel or building TensorBoard.
|
10 |
+
|
11 |
+
[plugin-examples]: ./tensorboard/examples/plugins
|
12 |
+
[plugin-distribution]: ./ADDING_A_PLUGIN.md#distribution
|
13 |
+
|
14 |
+
# How to Develop TensorBoard
|
15 |
+
|
16 |
+
TensorBoard at HEAD relies on the nightly installation of TensorFlow: this allows plugin authors to use the latest features of TensorFlow, but it means release versions of TensorFlow may not suffice for development. We recommend installing TensorFlow nightly in a [Python virtualenv](https://virtualenv.pypa.io), and then running your modified development copy of TensorBoard within that virtualenv. To install TensorFlow nightly within the virtualenv, as well as TensorBoard's runtime and tooling dependencies, you can run:
|
17 |
+
|
18 |
+
```sh
|
19 |
+
$ virtualenv -p python3 tf
|
20 |
+
$ source tf/bin/activate
|
21 |
+
(tf)$ pip install --upgrade pip
|
22 |
+
(tf)$ pip install tf-nightly -r tensorboard/pip_package/requirements.txt -r tensorboard/pip_package/requirements_dev.txt
|
23 |
+
```
|
24 |
+
|
25 |
+
TensorBoard builds are done with [Bazel](https://bazel.build), so you may need to [install Bazel](https://docs.bazel.build/versions/master/install.html). The Bazel build will automatically "vulcanize" all the HTML files and generate a "binary" launcher script. When HTML is vulcanized, it means all the script tags and HTML imports are inlined into one big HTML file. Then the Bazel build puts that index.html file inside a static assets zip. The python HTTP server then reads static assets from that zip while serving.
|
26 |
+
|
27 |
+
You can build and run TensorBoard via Bazel (from within the TensorFlow nightly virtualenv) as follows:
|
28 |
+
|
29 |
+
```sh
|
30 |
+
(tf)$ bazel run //tensorboard -- --logdir /path/to/logs
|
31 |
+
```
|
32 |
+
|
33 |
+
For any changes to the frontend, you’ll need to install [Yarn][yarn] to lint your code (`yarn lint`, `yarn fix-lint`). You’ll also need Yarn to add or remove any NPM dependencies.
|
34 |
+
|
35 |
+
For any changes to the backend, you’ll need to install [Black][black] to lint your code (run `black .`). Our `black` version is specified in `requirements_dev.txt` in this repository. Black only runs on Python 3.6 or higher, so you may want to install it into a separate virtual environment and use a [wrapper script to invoke it from any environment][black-wrapper].
|
36 |
+
|
37 |
+
You may wish to configure your editor to automatically run Prettier and Black on save.
|
38 |
+
|
39 |
+
To generate fake log data for a plugin, run its demo script. For instance, this command generates fake scalar data in `/tmp/scalars_demo`:
|
40 |
+
|
41 |
+
```sh
|
42 |
+
(tf)$ bazel run //tensorboard/plugins/scalar:scalars_demo
|
43 |
+
```
|
44 |
+
|
45 |
+
If you have Bazel≥0.16 and want to build any commit of TensorBoard prior to 2018-08-07, then you must first cherry-pick [pull request #1334][pr-1334] onto your working tree:
|
46 |
+
|
47 |
+
```
|
48 |
+
$ git cherry-pick bc4e7a6e5517daf918433a8f5983fc6bd239358f
|
49 |
+
```
|
50 |
+
|
51 |
+
[black]: https://github.com/psf/black
|
52 |
+
[black-wrapper]: https://gist.github.com/wchargin/d65820919f363d33545159138c86ce31
|
53 |
+
[pr-1334]: https://github.com/tensorflow/tensorboard/pull/1334
|
54 |
+
[yarn]: https://yarnpkg.com/
|
55 |
+
|
56 |
+
## Pro tips
|
57 |
+
|
58 |
+
You may find the following optional tips useful for development.
|
59 |
+
|
60 |
+
### Ignoring large cleanup commits in `git blame`
|
61 |
+
|
62 |
+
```shell
|
63 |
+
git config blame.ignoreRevsFile .git-blame-ignore-revs # requires Git >= 2.23
|
64 |
+
```
|
65 |
+
|
66 |
+
We maintain a list of commits with large diffs that are known to not have any
|
67 |
+
semantic effect, like mass code reformattings. As of Git 2.23, you can configure
|
68 |
+
Git to ignore these commits in the output of `git blame`, so that lines are
|
69 |
+
blamed to the most recent “real” change. Set the `blame.ignoreRevsFile` Git
|
70 |
+
config option to `.git-blame-ignore-revs` to enable this by default, or pass
|
71 |
+
`--ignore-revs-file .git-blame-ignore-revs` to enable it for a single command.
|
72 |
+
When enabled by default, this also works with editor plugins like
|
73 |
+
[vim-fugitive]. See `git help blame` and `git help config` for more details.
|
74 |
+
|
75 |
+
[vim-fugitive]: https://github.com/tpope/vim-fugitive
|
76 |
+
|
77 |
+
### iBazel: A file watcher for Bazel.
|
78 |
+
|
79 |
+
Bazel is capable of performing incremental builds where it builds only the
|
80 |
+
subset of files that are impacted by file changes. However, it does not come
|
81 |
+
with a file watcher. For an improved developer experience, start TensorBoard
|
82 |
+
with `ibazel` instead of `bazel` which will automatically re-build and start the
|
83 |
+
server when files change.
|
84 |
+
|
85 |
+
If you do not have the ibazel binary on your system, you can use the command
|
86 |
+
below.
|
87 |
+
|
88 |
+
```sh
|
89 |
+
# Optionally run `yarn` to keep `node_modules` up-to-date.
|
90 |
+
yarn run ibazel run tensorboard -- -- --logdir [LOG_DIR]
|
91 |
+
```
|
92 |
+
|
93 |
+
### Debugging UI Tests Locally
|
94 |
+
|
95 |
+
Our UI tests (e.g., //tensorboard/components/vz_sorting/test) use HTML import
|
96 |
+
which is now deprecated from all browsers (Chrome 79- had the native support)
|
97 |
+
and is run without any polyfills. In order to debug tests, you may want to run a
|
98 |
+
a Chromium used by our CI that supports HTML import. It can be found in
|
99 |
+
`./bazel-bin/third_party/chromium/chromium.out` (exact path to binary will
|
100 |
+
differ by OS you are on; for Linux, the full path is
|
101 |
+
`./bazel-bin/third_party/chromium/chromium.out/chrome-linux/chrome`).
|
102 |
+
|
103 |
+
For example of the vz_sorting test,
|
104 |
+
|
105 |
+
```sh
|
106 |
+
# Run the debug instance of the test. It should run a web server at a dynamic
|
107 |
+
# port.
|
108 |
+
bazel run tensorboard/components/vz_sorting/test:test_web_library
|
109 |
+
|
110 |
+
# In another tab:
|
111 |
+
|
112 |
+
# Fetch, if missing, the Chromium
|
113 |
+
bazel build third_party/chromium
|
114 |
+
./bazel-bin/third_party/chromium/chromium.out/chrome-linux/chrome
|
115 |
+
|
116 |
+
# Lastly, put the address returnd by the web server into the Chromium.
|
117 |
+
```
|
saved_models/tensorboard/LICENSE
ADDED
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright 2017 The TensorFlow Authors. All rights reserved.
|
2 |
+
|
3 |
+
Apache License
|
4 |
+
Version 2.0, January 2004
|
5 |
+
http://www.apache.org/licenses/
|
6 |
+
|
7 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
8 |
+
|
9 |
+
1. Definitions.
|
10 |
+
|
11 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
12 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
13 |
+
|
14 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
15 |
+
the copyright owner that is granting the License.
|
16 |
+
|
17 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
18 |
+
other entities that control, are controlled by, or are under common
|
19 |
+
control with that entity. For the purposes of this definition,
|
20 |
+
"control" means (i) the power, direct or indirect, to cause the
|
21 |
+
direction or management of such entity, whether by contract or
|
22 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
23 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
24 |
+
|
25 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
26 |
+
exercising permissions granted by this License.
|
27 |
+
|
28 |
+
"Source" form shall mean the preferred form for making modifications,
|
29 |
+
including but not limited to software source code, documentation
|
30 |
+
source, and configuration files.
|
31 |
+
|
32 |
+
"Object" form shall mean any form resulting from mechanical
|
33 |
+
transformation or translation of a Source form, including but
|
34 |
+
not limited to compiled object code, generated documentation,
|
35 |
+
and conversions to other media types.
|
36 |
+
|
37 |
+
"Work" shall mean the work of authorship, whether in Source or
|
38 |
+
Object form, made available under the License, as indicated by a
|
39 |
+
copyright notice that is included in or attached to the work
|
40 |
+
(an example is provided in the Appendix below).
|
41 |
+
|
42 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
43 |
+
form, that is based on (or derived from) the Work and for which the
|
44 |
+
editorial revisions, annotations, elaborations, or other modifications
|
45 |
+
represent, as a whole, an original work of authorship. For the purposes
|
46 |
+
of this License, Derivative Works shall not include works that remain
|
47 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
48 |
+
the Work and Derivative Works thereof.
|
49 |
+
|
50 |
+
"Contribution" shall mean any work of authorship, including
|
51 |
+
the original version of the Work and any modifications or additions
|
52 |
+
to that Work or Derivative Works thereof, that is intentionally
|
53 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
54 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
55 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
56 |
+
means any form of electronic, verbal, or written communication sent
|
57 |
+
to the Licensor or its representatives, including but not limited to
|
58 |
+
communication on electronic mailing lists, source code control systems,
|
59 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
60 |
+
Licensor for the purpose of discussing and improving the Work, but
|
61 |
+
excluding communication that is conspicuously marked or otherwise
|
62 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
63 |
+
|
64 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
65 |
+
on behalf of whom a Contribution has been received by Licensor and
|
66 |
+
subsequently incorporated within the Work.
|
67 |
+
|
68 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
69 |
+
this License, each Contributor hereby grants to You a perpetual,
|
70 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
71 |
+
copyright license to reproduce, prepare Derivative Works of,
|
72 |
+
publicly display, publicly perform, sublicense, and distribute the
|
73 |
+
Work and such Derivative Works in Source or Object form.
|
74 |
+
|
75 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
76 |
+
this License, each Contributor hereby grants to You a perpetual,
|
77 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
78 |
+
(except as stated in this section) patent license to make, have made,
|
79 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
80 |
+
where such license applies only to those patent claims licensable
|
81 |
+
by such Contributor that are necessarily infringed by their
|
82 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
83 |
+
with the Work to which such Contribution(s) was submitted. If You
|
84 |
+
institute patent litigation against any entity (including a
|
85 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
86 |
+
or a Contribution incorporated within the Work constitutes direct
|
87 |
+
or contributory patent infringement, then any patent licenses
|
88 |
+
granted to You under this License for that Work shall terminate
|
89 |
+
as of the date such litigation is filed.
|
90 |
+
|
91 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
92 |
+
Work or Derivative Works thereof in any medium, with or without
|
93 |
+
modifications, and in Source or Object form, provided that You
|
94 |
+
meet the following conditions:
|
95 |
+
|
96 |
+
(a) You must give any other recipients of the Work or
|
97 |
+
Derivative Works a copy of this License; and
|
98 |
+
|
99 |
+
(b) You must cause any modified files to carry prominent notices
|
100 |
+
stating that You changed the files; and
|
101 |
+
|
102 |
+
(c) You must retain, in the Source form of any Derivative Works
|
103 |
+
that You distribute, all copyright, patent, trademark, and
|
104 |
+
attribution notices from the Source form of the Work,
|
105 |
+
excluding those notices that do not pertain to any part of
|
106 |
+
the Derivative Works; and
|
107 |
+
|
108 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
109 |
+
distribution, then any Derivative Works that You distribute must
|
110 |
+
include a readable copy of the attribution notices contained
|
111 |
+
within such NOTICE file, excluding those notices that do not
|
112 |
+
pertain to any part of the Derivative Works, in at least one
|
113 |
+
of the following places: within a NOTICE text file distributed
|
114 |
+
as part of the Derivative Works; within the Source form or
|
115 |
+
documentation, if provided along with the Derivative Works; or,
|
116 |
+
within a display generated by the Derivative Works, if and
|
117 |
+
wherever such third-party notices normally appear. The contents
|
118 |
+
of the NOTICE file are for informational purposes only and
|
119 |
+
do not modify the License. You may add Your own attribution
|
120 |
+
notices within Derivative Works that You distribute, alongside
|
121 |
+
or as an addendum to the NOTICE text from the Work, provided
|
122 |
+
that such additional attribution notices cannot be construed
|
123 |
+
as modifying the License.
|
124 |
+
|
125 |
+
You may add Your own copyright statement to Your modifications and
|
126 |
+
may provide additional or different license terms and conditions
|
127 |
+
for use, reproduction, or distribution of Your modifications, or
|
128 |
+
for any such Derivative Works as a whole, provided Your use,
|
129 |
+
reproduction, and distribution of the Work otherwise complies with
|
130 |
+
the conditions stated in this License.
|
131 |
+
|
132 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
133 |
+
any Contribution intentionally submitted for inclusion in the Work
|
134 |
+
by You to the Licensor shall be under the terms and conditions of
|
135 |
+
this License, without any additional terms or conditions.
|
136 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
137 |
+
the terms of any separate license agreement you may have executed
|
138 |
+
with Licensor regarding such Contributions.
|
139 |
+
|
140 |
+
6. Trademarks. This License does not grant permission to use the trade
|
141 |
+
names, trademarks, service marks, or product names of the Licensor,
|
142 |
+
except as required for reasonable and customary use in describing the
|
143 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
144 |
+
|
145 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
146 |
+
agreed to in writing, Licensor provides the Work (and each
|
147 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
148 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
149 |
+
implied, including, without limitation, any warranties or conditions
|
150 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
151 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
152 |
+
appropriateness of using or redistributing the Work and assume any
|
153 |
+
risks associated with Your exercise of permissions under this License.
|
154 |
+
|
155 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
156 |
+
whether in tort (including negligence), contract, or otherwise,
|
157 |
+
unless required by applicable law (such as deliberate and grossly
|
158 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
159 |
+
liable to You for damages, including any direct, indirect, special,
|
160 |
+
incidental, or consequential damages of any character arising as a
|
161 |
+
result of this License or out of the use or inability to use the
|
162 |
+
Work (including but not limited to damages for loss of goodwill,
|
163 |
+
work stoppage, computer failure or malfunction, or any and all
|
164 |
+
other commercial damages or losses), even if such Contributor
|
165 |
+
has been advised of the possibility of such damages.
|
166 |
+
|
167 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
168 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
169 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
170 |
+
or other liability obligations and/or rights consistent with this
|
171 |
+
License. However, in accepting such obligations, You may act only
|
172 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
173 |
+
of any other Contributor, and only if You agree to indemnify,
|
174 |
+
defend, and hold each Contributor harmless for any liability
|
175 |
+
incurred by, or claims asserted against, such Contributor by reason
|
176 |
+
of your accepting any such warranty or additional liability.
|
177 |
+
|
178 |
+
END OF TERMS AND CONDITIONS
|
179 |
+
|
180 |
+
APPENDIX: How to apply the Apache License to your work.
|
181 |
+
|
182 |
+
To apply the Apache License to your work, attach the following
|
183 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
184 |
+
replaced with your own identifying information. (Don't include
|
185 |
+
the brackets!) The text should be enclosed in the appropriate
|
186 |
+
comment syntax for the file format. We also recommend that a
|
187 |
+
file or class name and description of purpose be included on the
|
188 |
+
same "printed page" as the copyright notice for easier
|
189 |
+
identification within third-party archives.
|
190 |
+
|
191 |
+
Copyright 2017, The TensorFlow Authors.
|
192 |
+
|
193 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
194 |
+
you may not use this file except in compliance with the License.
|
195 |
+
You may obtain a copy of the License at
|
196 |
+
|
197 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
198 |
+
|
199 |
+
Unless required by applicable law or agreed to in writing, software
|
200 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
201 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
202 |
+
See the License for the specific language governing permissions and
|
203 |
+
limitations under the License.
|
saved_models/tensorboard/README.md
ADDED
@@ -0,0 +1,427 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# TensorBoard [![Travis build status](https://travis-ci.org/tensorflow/tensorboard.svg?branch=master)](https://travis-ci.org/tensorflow/tensorboard/) [![GitHub Actions CI](https://github.com/tensorflow/tensorboard/workflows/CI/badge.svg)](https://github.com/tensorflow/tensorboard/actions?query=workflow%3ACI+branch%3Amaster+event%3Apush) [![Compat check PyPI](https://python-compatibility-tools.appspot.com/one_badge_image?package=tensorboard)](https://python-compatibility-tools.appspot.com/one_badge_target?package=tensorboard)
|
2 |
+
|
3 |
+
TensorBoard is a suite of web applications for inspecting and understanding your
|
4 |
+
TensorFlow runs and graphs.
|
5 |
+
|
6 |
+
This README gives an overview of key concepts in TensorBoard, as well as how to
|
7 |
+
interpret the visualizations TensorBoard provides. For an in-depth example of
|
8 |
+
using TensorBoard, see the tutorial: [TensorBoard: Getting Started][].
|
9 |
+
Documentation on how to use TensorBoard to work with images, graphs, hyper
|
10 |
+
parameters, and more are linked from there, along with tutorial walk-throughs in
|
11 |
+
Colab.
|
12 |
+
|
13 |
+
You may also be interested in the hosted TensorBoard solution at
|
14 |
+
[TensorBoard.dev][]. You can use TensorBoard.dev to easily host, track, and
|
15 |
+
share your ML experiments for free. For example, [this experiment][] shows a
|
16 |
+
working example featuring the scalars, graphs, histograms, distributions, and
|
17 |
+
hparams dashboards.
|
18 |
+
|
19 |
+
TensorBoard is designed to run entirely offline, without requiring any access
|
20 |
+
to the Internet. For instance, this may be on your local machine, behind a
|
21 |
+
corporate firewall, or in a datacenter.
|
22 |
+
|
23 |
+
[TensorBoard: Getting Started]: https://www.tensorflow.org/tensorboard/get_started
|
24 |
+
[TensorBoard.dev]: https://tensorboard.dev
|
25 |
+
[This experiment]: https://tensorboard.dev/experiment/EDZb7XgKSBKo6Gznh3i8hg/#scalars
|
26 |
+
|
27 |
+
# Usage
|
28 |
+
|
29 |
+
Before running TensorBoard, make sure you have generated summary data in a log
|
30 |
+
directory by creating a summary writer:
|
31 |
+
|
32 |
+
``` python
|
33 |
+
# sess.graph contains the graph definition; that enables the Graph Visualizer.
|
34 |
+
|
35 |
+
file_writer = tf.summary.FileWriter('/path/to/logs', sess.graph)
|
36 |
+
```
|
37 |
+
|
38 |
+
For more details, see
|
39 |
+
[the TensorBoard tutorial](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
|
40 |
+
Once you have event files, run TensorBoard and provide the log directory. If
|
41 |
+
you're using a precompiled TensorFlow package (e.g. you installed via pip), run:
|
42 |
+
|
43 |
+
```
|
44 |
+
tensorboard --logdir path/to/logs
|
45 |
+
```
|
46 |
+
|
47 |
+
Or, if you are building from source:
|
48 |
+
|
49 |
+
```bash
|
50 |
+
bazel build tensorboard:tensorboard
|
51 |
+
./bazel-bin/tensorboard/tensorboard --logdir path/to/logs
|
52 |
+
|
53 |
+
# or even more succinctly
|
54 |
+
bazel run tensorboard -- --logdir path/to/logs
|
55 |
+
```
|
56 |
+
|
57 |
+
This should print that TensorBoard has started. Next, connect to
|
58 |
+
http://localhost:6006.
|
59 |
+
|
60 |
+
TensorBoard requires a `logdir` to read logs from. For info on configuring
|
61 |
+
TensorBoard, run `tensorboard --help`.
|
62 |
+
|
63 |
+
TensorBoard can be used in Google Chrome or Firefox. Other browsers might
|
64 |
+
work, but there may be bugs or performance issues.
|
65 |
+
|
66 |
+
# Key Concepts
|
67 |
+
|
68 |
+
### Summary Ops: How TensorBoard gets data from TensorFlow
|
69 |
+
|
70 |
+
The first step in using TensorBoard is acquiring data from your TensorFlow run.
|
71 |
+
For this, you need
|
72 |
+
[summary ops](https://www.tensorflow.org/api_docs/python/tf/summary).
|
73 |
+
Summary ops are ops, just like
|
74 |
+
[`tf.matmul`](https://www.tensorflow.org/api_docs/python/tf/linalg/matmul)
|
75 |
+
and
|
76 |
+
[`tf.nn.relu`](https://www.tensorflow.org/api_docs/python/tf/nn/relu),
|
77 |
+
which means they take in tensors, produce tensors, and are evaluated from within
|
78 |
+
a TensorFlow graph. However, summary ops have a twist: the Tensors they produce
|
79 |
+
contain serialized protobufs, which are written to disk and sent to TensorBoard.
|
80 |
+
To visualize the summary data in TensorBoard, you should evaluate the summary
|
81 |
+
op, retrieve the result, and then write that result to disk using a
|
82 |
+
summary.FileWriter. A full explanation, with examples, is in [the
|
83 |
+
tutorial](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
|
84 |
+
|
85 |
+
The supported summary ops include:
|
86 |
+
* [`tf.summary.scalar`](https://www.tensorflow.org/api_docs/python/tf/summary/scalar)
|
87 |
+
* [`tf.summary.image`](https://www.tensorflow.org/api_docs/python/tf/summary/image)
|
88 |
+
* [`tf.summary.audio`](https://www.tensorflow.org/api_docs/python/tf/summary/audio)
|
89 |
+
* [`tf.summary.text`](https://www.tensorflow.org/api_docs/python/tf/summary/text)
|
90 |
+
* [`tf.summary.histogram`](https://www.tensorflow.org/api_docs/python/tf/summary/histogram)
|
91 |
+
|
92 |
+
### Tags: Giving names to data
|
93 |
+
|
94 |
+
When you make a summary op, you will also give it a `tag`. The tag is basically
|
95 |
+
a name for the data recorded by that op, and will be used to organize the data
|
96 |
+
in the frontend. The scalar and histogram dashboards organize data by tag, and
|
97 |
+
group the tags into folders according to a directory/like/hierarchy. If you have
|
98 |
+
a lot of tags, we recommend grouping them with slashes.
|
99 |
+
|
100 |
+
### Event Files & LogDirs: How TensorBoard loads the data
|
101 |
+
|
102 |
+
`summary.FileWriters` take summary data from TensorFlow, and then write them to a
|
103 |
+
specified directory, known as the `logdir`. Specifically, the data is written to
|
104 |
+
an append-only record dump that will have "tfevents" in the filename.
|
105 |
+
TensorBoard reads data from a full directory, and organizes it into the history
|
106 |
+
of a single TensorFlow execution.
|
107 |
+
|
108 |
+
Why does it read the whole directory, rather than an individual file? You might
|
109 |
+
have been using
|
110 |
+
[supervisor.py](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/training/supervisor.py)
|
111 |
+
to run your model, in which case if TensorFlow crashes, the supervisor will
|
112 |
+
restart it from a checkpoint. When it restarts, it will start writing to a new
|
113 |
+
events file, and TensorBoard will stitch the various event files together to
|
114 |
+
produce a consistent history of what happened.
|
115 |
+
|
116 |
+
### Runs: Comparing different executions of your model
|
117 |
+
|
118 |
+
You may want to visually compare multiple executions of your model; for example,
|
119 |
+
suppose you've changed the hyperparameters and want to see if it's converging
|
120 |
+
faster. TensorBoard enables this through different "runs". When TensorBoard is
|
121 |
+
passed a `logdir` at startup, it recursively walks the directory tree rooted at
|
122 |
+
`logdir` looking for subdirectories that contain tfevents data. Every time it
|
123 |
+
encounters such a subdirectory, it loads it as a new `run`, and the frontend
|
124 |
+
will organize the data accordingly.
|
125 |
+
|
126 |
+
For example, here is a well-organized TensorBoard log directory, with two runs,
|
127 |
+
"run1" and "run2".
|
128 |
+
|
129 |
+
```
|
130 |
+
/some/path/mnist_experiments/
|
131 |
+
/some/path/mnist_experiments/run1/
|
132 |
+
/some/path/mnist_experiments/run1/events.out.tfevents.1456525581.name
|
133 |
+
/some/path/mnist_experiments/run1/events.out.tfevents.1456525585.name
|
134 |
+
/some/path/mnist_experiments/run2/
|
135 |
+
/some/path/mnist_experiments/run2/events.out.tfevents.1456525385.name
|
136 |
+
/tensorboard --logdir /some/path/mnist_experiments
|
137 |
+
```
|
138 |
+
|
139 |
+
#### Logdir & Logdir_spec (Legacy Mode)
|
140 |
+
|
141 |
+
You may also pass a comma separated list of log directories, and TensorBoard
|
142 |
+
will watch each directory. You can also assign names to individual log
|
143 |
+
directories by putting a colon between the name and the path, as in
|
144 |
+
|
145 |
+
```
|
146 |
+
tensorboard --logdir_spec name1:/path/to/logs/1,name2:/path/to/logs/2
|
147 |
+
```
|
148 |
+
|
149 |
+
_This flag (`--logdir_spec`) is discouraged and can usually be avoided_. TensorBoard walks log directories recursively; for finer-grained control, prefer using a symlink tree. _Some features may not work when using `--logdir_spec` instead of `--logdir`._
|
150 |
+
|
151 |
+
# The Visualizations
|
152 |
+
|
153 |
+
### Scalar Dashboard
|
154 |
+
|
155 |
+
TensorBoard's Scalar Dashboard visualizes scalar statistics that vary over time;
|
156 |
+
for example, you might want to track the model's loss or learning rate. As
|
157 |
+
described in *Key Concepts*, you can compare multiple runs, and the data is
|
158 |
+
organized by tag. The line charts have the following interactions:
|
159 |
+
|
160 |
+
* Clicking on the small blue icon in the lower-left corner of each chart will
|
161 |
+
expand the chart
|
162 |
+
|
163 |
+
* Dragging a rectangular region on the chart will zoom in
|
164 |
+
|
165 |
+
* Double clicking on the chart will zoom out
|
166 |
+
|
167 |
+
* Mousing over the chart will produce crosshairs, with data values recorded in
|
168 |
+
the run-selector on the left.
|
169 |
+
|
170 |
+
Additionally, you can create new folders to organize tags by writing regular
|
171 |
+
expressions in the box in the top-left of the dashboard.
|
172 |
+
|
173 |
+
### Histogram Dashboard
|
174 |
+
|
175 |
+
The HistogramDashboard displays how the statistical distribution of a Tensor
|
176 |
+
has varied over time. It visualizes data recorded via `tf.summary.histogram`.
|
177 |
+
Each chart shows temporal "slices" of data, where each slice is a histogram of
|
178 |
+
the tensor at a given step. It's organized with the oldest timestep in the back,
|
179 |
+
and the most recent timestep in front. By changing the Histogram Mode from
|
180 |
+
"offset" to "overlay", the perspective will rotate so that every histogram slice
|
181 |
+
is rendered as a line and overlaid with one another.
|
182 |
+
|
183 |
+
### Distribution Dashboard
|
184 |
+
|
185 |
+
The Distribution Dashboard is another way of visualizing histogram data from
|
186 |
+
`tf.summary.histogram`. It shows some high-level statistics on a distribution.
|
187 |
+
Each line on the chart represents a percentile in the distribution over the
|
188 |
+
data: for example, the bottom line shows how the minimum value has changed over
|
189 |
+
time, and the line in the middle shows how the median has changed. Reading from
|
190 |
+
top to bottom, the lines have the following meaning: `[maximum, 93%, 84%, 69%,
|
191 |
+
50%, 31%, 16%, 7%, minimum]`
|
192 |
+
|
193 |
+
These percentiles can also be viewed as standard deviation boundaries on a
|
194 |
+
normal distribution: `[maximum, μ+1.5σ, μ+σ, μ+0.5σ, μ, μ-0.5σ, μ-σ, μ-1.5σ,
|
195 |
+
minimum]` so that the colored regions, read from inside to outside, have widths
|
196 |
+
`[σ, 2σ, 3σ]` respectively.
|
197 |
+
|
198 |
+
|
199 |
+
### Image Dashboard
|
200 |
+
|
201 |
+
The Image Dashboard can display pngs that were saved via a `tf.summary.image`.
|
202 |
+
The dashboard is set up so that each row corresponds to a different tag, and
|
203 |
+
each column corresponds to a run. Since the image dashboard supports arbitrary
|
204 |
+
pngs, you can use this to embed custom visualizations (e.g. matplotlib
|
205 |
+
scatterplots) into TensorBoard. This dashboard always shows you the latest image
|
206 |
+
for each tag.
|
207 |
+
|
208 |
+
### Audio Dashboard
|
209 |
+
|
210 |
+
The Audio Dashboard can embed playable audio widgets for audio saved via a
|
211 |
+
`tf.summary.audio`. The dashboard is set up so that each row corresponds to a
|
212 |
+
different tag, and each column corresponds to a run. This dashboard always
|
213 |
+
embeds the latest audio for each tag.
|
214 |
+
|
215 |
+
### Graph Explorer
|
216 |
+
|
217 |
+
The Graph Explorer can visualize a TensorBoard graph, enabling inspection of the
|
218 |
+
TensorFlow model. To get best use of the graph visualizer, you should use name
|
219 |
+
scopes to hierarchically group the ops in your graph - otherwise, the graph may
|
220 |
+
be difficult to decipher. For more information, including examples, see [the
|
221 |
+
graph visualizer tutorial](https://www.tensorflow.org/get_started/graph_viz).
|
222 |
+
|
223 |
+
### Embedding Projector
|
224 |
+
|
225 |
+
The Embedding Projector allows you to visualize high-dimensional data; for
|
226 |
+
example, you may view your input data after it has been embedded in a high-
|
227 |
+
dimensional space by your model. The embedding projector reads data from your
|
228 |
+
model checkpoint file, and may be configured with additional metadata, like
|
229 |
+
a vocabulary file or sprite images. For more details, see [the embedding
|
230 |
+
projector tutorial](https://www.tensorflow.org/tutorials/text/word_embeddings).
|
231 |
+
|
232 |
+
### Text Dashboard
|
233 |
+
|
234 |
+
The Text Dashboard displays text snippets saved via `tf.summary.text`. Markdown
|
235 |
+
features including hyperlinks, lists, and tables are all supported.
|
236 |
+
|
237 |
+
# Frequently Asked Questions
|
238 |
+
|
239 |
+
### My TensorBoard isn't showing any data! What's wrong?
|
240 |
+
|
241 |
+
First, check that the directory passed to `--logdir` is correct. You can also
|
242 |
+
verify this by navigating to the Scalars dashboard (under the "Inactive" menu)
|
243 |
+
and looking for the log directory path at the bottom of the left sidebar.
|
244 |
+
|
245 |
+
If you're loading from the proper path, make sure that event files are present.
|
246 |
+
TensorBoard will recursively walk its logdir, it's fine if the data is nested
|
247 |
+
under a subdirectory. Ensure the following shows at least one result:
|
248 |
+
|
249 |
+
`find DIRECTORY_PATH | grep tfevents`
|
250 |
+
|
251 |
+
You can also check that the event files actually have data by running
|
252 |
+
tensorboard in inspect mode to inspect the contents of your event files.
|
253 |
+
|
254 |
+
`tensorboard --inspect --logdir DIRECTORY_PATH`
|
255 |
+
|
256 |
+
### TensorBoard is showing only some of my data, or isn't properly updating!
|
257 |
+
|
258 |
+
> **Update:** After [2.3.0 release][2-3-0], TensorBoard no longer auto reloads
|
259 |
+
> every 30 seconds. To re-enable the behavior, please open the settings by
|
260 |
+
> clicking the gear icon in the top-right of the TensorBoard web interface, and
|
261 |
+
> enable "Reload data".
|
262 |
+
|
263 |
+
> **Update:** the [experimental `--reload_multifile=true` option][pr-1867] can
|
264 |
+
> now be used to poll all "active" files in a directory for new data, rather
|
265 |
+
> than the most recent one as described below. A file is "active" as long as it
|
266 |
+
> received new data within `--reload_multifile_inactive_secs` seconds ago,
|
267 |
+
> defaulting to 4000.
|
268 |
+
|
269 |
+
This issue usually comes about because of how TensorBoard iterates through the
|
270 |
+
`tfevents` files: it progresses through the events file in timestamp order, and
|
271 |
+
only reads one file at a time. Let's suppose we have files with timestamps `a`
|
272 |
+
and `b`, where `a<b`. Once TensorBoard has read all the events in `a`, it will
|
273 |
+
never return to it, because it assumes any new events are being written in the
|
274 |
+
more recent file. This could cause an issue if, for example, you have two
|
275 |
+
`FileWriters` simultaneously writing to the same directory. If you have
|
276 |
+
multiple summary writers, each one should be writing to a separate directory.
|
277 |
+
|
278 |
+
### Does TensorBoard support multiple or distributed summary writers?
|
279 |
+
|
280 |
+
> **Update:** the [experimental `--reload_multifile=true` option][pr-1867] can
|
281 |
+
> now be used to poll all "active" files in a directory for new data, defined as
|
282 |
+
> any file that received new data within `--reload_multifile_inactive_secs`
|
283 |
+
> seconds ago, defaulting to 4000.
|
284 |
+
|
285 |
+
No. TensorBoard expects that only one events file will be written to at a time,
|
286 |
+
and multiple summary writers means multiple events files. If you are running a
|
287 |
+
distributed TensorFlow instance, we encourage you to designate a single worker
|
288 |
+
as the "chief" that is responsible for all summary processing. See
|
289 |
+
[supervisor.py](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/training/supervisor.py)
|
290 |
+
for an example.
|
291 |
+
|
292 |
+
### I'm seeing data overlapped on itself! What gives?
|
293 |
+
|
294 |
+
If you are seeing data that seems to travel backwards through time and overlap
|
295 |
+
with itself, there are a few possible explanations.
|
296 |
+
|
297 |
+
* You may have multiple execution of TensorFlow that all wrote to the same log
|
298 |
+
directory. Please have each TensorFlow run write to its own logdir.
|
299 |
+
|
300 |
+
> **Update:** the [experimental `--reload_multifile=true` option][pr-1867] can
|
301 |
+
> now be used to poll all "active" files in a directory for new data, defined
|
302 |
+
> as any file that received new data within `--reload_multifile_inactive_secs`
|
303 |
+
> seconds ago, defaulting to 4000.
|
304 |
+
|
305 |
+
* You may have a bug in your code where the global_step variable (passed
|
306 |
+
to `FileWriter.add_summary`) is being maintained incorrectly.
|
307 |
+
|
308 |
+
* It may be that your TensorFlow job crashed, and was restarted from an earlier
|
309 |
+
checkpoint. See *How to handle TensorFlow restarts*, below.
|
310 |
+
|
311 |
+
As a workaround, try changing the x-axis display in TensorBoard from `steps` to
|
312 |
+
`wall_time`. This will frequently clear up the issue.
|
313 |
+
|
314 |
+
### How should I handle TensorFlow restarts?
|
315 |
+
|
316 |
+
TensorFlow is designed with a mechanism for graceful recovery if a job crashes
|
317 |
+
or is killed: TensorFlow can periodically write model checkpoint files, which
|
318 |
+
enable you to restart TensorFlow without losing all your training progress.
|
319 |
+
|
320 |
+
However, this can complicate things for TensorBoard; imagine that TensorFlow
|
321 |
+
wrote a checkpoint at step `a`, and then continued running until step `b`, and
|
322 |
+
then crashed and restarted at timestamp `a`. All of the events written between
|
323 |
+
`a` and `b` were "orphaned" by the restart event and should be removed.
|
324 |
+
|
325 |
+
To facilitate this, we have a `SessionLog` message in
|
326 |
+
`tensorflow/core/util/event.proto` which can record `SessionStatus.START` as an
|
327 |
+
event; like all events, it may have a `step` associated with it. If TensorBoard
|
328 |
+
detects a `SessionStatus.START` event with step `a`, it will assume that every
|
329 |
+
event with a step greater than `a` was orphaned, and it will discard those
|
330 |
+
events. This behavior may be disabled with the flag
|
331 |
+
`--purge_orphaned_data false` (in versions after 0.7).
|
332 |
+
|
333 |
+
### How can I export data from TensorBoard?
|
334 |
+
|
335 |
+
The Scalar Dashboard supports exporting data; you can click the "enable
|
336 |
+
download links" option in the left-hand bar. Then, each plot will provide
|
337 |
+
download links for the data it contains.
|
338 |
+
|
339 |
+
If you need access to the full dataset, you can read the event files that
|
340 |
+
TensorBoard consumes by using the [`summary_iterator`](
|
341 |
+
https://www.tensorflow.org/api_docs/python/tf/train/summary_iterator)
|
342 |
+
method.
|
343 |
+
|
344 |
+
### Can I make my own plugin?
|
345 |
+
|
346 |
+
Yes! You can clone and tinker with one of the [examples][plugin-examples] and
|
347 |
+
make your own, amazing visualizations. More documentation on the plugin system
|
348 |
+
is described in the [ADDING_A_PLUGIN](./ADDING_A_PLUGIN.md) guide. Feel free to
|
349 |
+
file feature requests or questions about plugin functionality.
|
350 |
+
|
351 |
+
Once satisfied with your own groundbreaking new plugin, see the
|
352 |
+
[distribution section][plugin-distribution] on how to publish to PyPI and share
|
353 |
+
it with the community.
|
354 |
+
|
355 |
+
[plugin-examples]: ./tensorboard/examples/plugins
|
356 |
+
[plugin-distribution]: ./ADDING_A_PLUGIN.md#distribution
|
357 |
+
|
358 |
+
### Can I customize which lines appear in a plot?
|
359 |
+
|
360 |
+
Using the [custom scalars plugin](tensorboard/plugins/custom_scalar), you can
|
361 |
+
create scalar plots with lines for custom run-tag pairs. However, within the
|
362 |
+
original scalars dashboard, each scalar plot corresponds to data for a specific
|
363 |
+
tag and contains lines for each run that includes that tag.
|
364 |
+
|
365 |
+
### Can I visualize margins above and below lines?
|
366 |
+
|
367 |
+
Margin plots (that visualize lower and upper bounds) may be created with the
|
368 |
+
[custom scalars plugin](tensorboard/plugins/custom_scalar). The original
|
369 |
+
scalars plugin does not support visualizing margins.
|
370 |
+
|
371 |
+
### Can I create scatterplots (or other custom plots)?
|
372 |
+
|
373 |
+
This isn't yet possible. As a workaround, you could create your custom plot in
|
374 |
+
your own code (e.g. matplotlib) and then write it into an `SummaryProto`
|
375 |
+
(`core/framework/summary.proto`) and add it to your `FileWriter`. Then, your
|
376 |
+
custom plot will appear in the TensorBoard image tab.
|
377 |
+
|
378 |
+
### Is my data being downsampled? Am I really seeing all the data?
|
379 |
+
|
380 |
+
TensorBoard uses [reservoir
|
381 |
+
sampling](https://en.wikipedia.org/wiki/Reservoir_sampling) to downsample your
|
382 |
+
data so that it can be loaded into RAM. You can modify the number of elements it
|
383 |
+
will keep per tag by using the `--samples_per_plugin` command line argument (ex:
|
384 |
+
`--samples_per_plugin=scalars=500,images=20`).
|
385 |
+
See this [Stack Overflow question](http://stackoverflow.com/questions/43702546/tensorboard-doesnt-show-all-data-points/)
|
386 |
+
for some more information.
|
387 |
+
|
388 |
+
### I get a network security popup every time I run TensorBoard on a mac!
|
389 |
+
|
390 |
+
Versions of TensorBoard prior to TensorBoard 2.0 would by default serve on host
|
391 |
+
`0.0.0.0`, which is publicly accessible. For those versions of TensorBoard, you
|
392 |
+
can stop the popups by specifying `--host localhost` at startup.
|
393 |
+
|
394 |
+
In TensorBoard 2.0 and up, `--host localhost` is the default. Use `--bind_all`
|
395 |
+
to restore the old behavior of serving to the public network on both IPv4 and
|
396 |
+
IPv6.
|
397 |
+
|
398 |
+
### Can I run `tensorboard` without a TensorFlow installation?
|
399 |
+
|
400 |
+
TensorBoard 1.14+ can be run with a reduced feature set if you do not have
|
401 |
+
TensorFlow installed. The primary limitation is that as of 1.14, only the
|
402 |
+
following plugins are supported: scalars, custom scalars, image, audio,
|
403 |
+
graph, projector (partial), distributions, histograms, text, PR curves, mesh.
|
404 |
+
In addition, there is no support for log directories on Google Cloud Storage.
|
405 |
+
|
406 |
+
### How can I contribute to TensorBoard development?
|
407 |
+
|
408 |
+
See [DEVELOPMENT.md](DEVELOPMENT.md).
|
409 |
+
|
410 |
+
### I have a different issue that wasn't addressed here!
|
411 |
+
|
412 |
+
First, try searching our [GitHub
|
413 |
+
issues](https://github.com/tensorflow/tensorboard/issues) and
|
414 |
+
[Stack Overflow][stack-overflow]. It may be
|
415 |
+
that someone else has already had the same issue or question.
|
416 |
+
|
417 |
+
General usage questions (or problems that may be specific to your local setup)
|
418 |
+
should go to [Stack Overflow][stack-overflow].
|
419 |
+
|
420 |
+
If you have found a bug in TensorBoard, please [file a GitHub issue](
|
421 |
+
https://github.com/tensorflow/tensorboard/issues/new) with as much supporting
|
422 |
+
information as you can provide (e.g. attaching events files, including the output
|
423 |
+
of `tensorboard --inspect`, etc.).
|
424 |
+
|
425 |
+
[stack-overflow]: https://stackoverflow.com/questions/tagged/tensorboard
|
426 |
+
[pr-1867]: https://github.com/tensorflow/tensorboard/pull/1867
|
427 |
+
[2-3-0]: https://github.com/tensorflow/tensorboard/releases/tag/2.3.0
|
saved_models/tensorboard/RELEASE.md
ADDED
@@ -0,0 +1,716 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Release 2.3.0
|
2 |
+
|
3 |
+
The 2.3 minor series tracks TensorFlow 2.3.
|
4 |
+
|
5 |
+
## Features
|
6 |
+
|
7 |
+
- The 30 sec default reload period is now customizable in the Settings UI
|
8 |
+
(#2794)
|
9 |
+
- 🧪 **Experimental** Debugger V2 is now available; see the
|
10 |
+
[tutorial][debugger-v2-tutorial] on how to use the experimental TensorFlow
|
11 |
+
APIs to spot NaN's in tensors, view graphs generated from executions, and the
|
12 |
+
related lines in the Python source code (#3821)
|
13 |
+
|
14 |
+
## TensorBoard.dev updates
|
15 |
+
- Added support for showing the Distributions tab (#3762)
|
16 |
+
- Uploader now displays data statistics in the console while uploading data
|
17 |
+
(#3678)
|
18 |
+
- Added new uploader command line flags (#3707)
|
19 |
+
- `--dry_run`: causes the uploader to only read the logdir and display
|
20 |
+
statistics (if `--verbose` is the default 1) without uploading any data to
|
21 |
+
the server
|
22 |
+
- `--one_shot`: causes the uploader to exit immediately after all existing
|
23 |
+
data in the logdir are uploaded; this mode prints a warning message if the
|
24 |
+
logdir doesn't contain any uploadable data
|
25 |
+
- Upload button in the header offers a convenient, copyable command
|
26 |
+
- 🧪 **Experimental** DataFrame API: You can now read Scalars data from
|
27 |
+
TensorBoard.dev as a Pandas DataFrame (learn more [here][dataframe-tutorial])
|
28 |
+
|
29 |
+
[debugger-v2-tutorial]: https://www.tensorflow.org/tensorboard/debugger_v2
|
30 |
+
[dataframe-tutorial]: https://www.tensorflow.org/tensorboard/dataframe_api
|
31 |
+
|
32 |
+
## Bug fixes
|
33 |
+
- Projector plugin
|
34 |
+
- Shows data when logs exist in both logdir root and subdirectory (#3694)
|
35 |
+
- Fixed incorrect embeddings from TF2 checkpoints (#3679)
|
36 |
+
- Added support for binary format, with 2x speedup loading large tensors in
|
37 |
+
some cases (#3685) - thanks [@RustingSword](https://github.com/RustingSword)
|
38 |
+
- Added [Colab tutorial][projector-colab] for Projector plugin (#3423)
|
39 |
+
- Notebooks
|
40 |
+
- Increased port scanning from 10 to 100 to better support multi-tenant
|
41 |
+
Notebooks (#3780) - thanks [@jerrylian-db](https://github.com/jerrylian-db)
|
42 |
+
- Add proxy (e.g. jupyter-server-proxy) support for %tensorboard magics
|
43 |
+
(#3674) - thanks [@zac-hopkinson](https://github.com/zac-hopkinson)
|
44 |
+
- Set the TENSORBOARD_PROXY_URL environment variable
|
45 |
+
`export TENSORBOARD_PROXY_URL="/proxy/%PORT%/"`
|
46 |
+
- Dynamic plugins (Projector, Fairness Indicators, Profiler, What-If Tool)
|
47 |
+
appear when TensorBoard is launched programmatically via Python (#3695)
|
48 |
+
- Fixed download links in Custom Scalars (#3794)
|
49 |
+
- Updated broken docs (#3440, #3459, #3561, #3681) - thanks
|
50 |
+
[@LexusH](https://github.com/LexusH),
|
51 |
+
[@ManishAradwad](https://github.com/ManishAradwad),
|
52 |
+
[@ricmatsui](https://github.com/ricmatsui),
|
53 |
+
[@robertlugg](https://github.com/robertlugg)
|
54 |
+
- Better handling of S3-related InvalidRange errors (#3609) - thanks
|
55 |
+
[@ahirner](https://github.com/ahirner)
|
56 |
+
- Fixed deprecated numpy usage (#3768) - thanks
|
57 |
+
[@lgeiger](https://github.com/lgeiger)
|
58 |
+
|
59 |
+
[projector-colab]: https://www.tensorflow.org/tensorboard/tensorboard_projector_plugin
|
60 |
+
|
61 |
+
## Deprecations
|
62 |
+
|
63 |
+
- Beholder will be removed in a future release (#3843)
|
64 |
+
- Debugger (V1) will be removed in a future release, in favor of the
|
65 |
+
aforementioned V2 version
|
66 |
+
|
67 |
+
## Misc
|
68 |
+
|
69 |
+
The frontend now uses Angular (replaces the Polymer entry point, which will be
|
70 |
+
removed in a future release; still visible at the `/legacy.html` endpoint)
|
71 |
+
(#3779). If you observe any bugs that do not reproduce under `/legacy.html`,
|
72 |
+
please file an issue.
|
73 |
+
|
74 |
+
For dynamic plugins, please see their respective pages
|
75 |
+
([Fairness Indicators][fairness-docs], [Profiler][profiler-docs],
|
76 |
+
[What-If Tool][wit-docs]).
|
77 |
+
|
78 |
+
[fairness-docs]: https://github.com/tensorflow/fairness-indicators/commits/master
|
79 |
+
[profiler-docs]: https://github.com/tensorflow/profiler/commits/master
|
80 |
+
[wit-docs]: https://github.com/PAIR-code/what-if-tool/blob/master/RELEASE.md
|
81 |
+
|
82 |
+
# Release 2.2.2
|
83 |
+
|
84 |
+
## Features
|
85 |
+
|
86 |
+
- Some performance improvements to line charts (#3524)
|
87 |
+
- Performance improvements in the Text plugin due to batch HTML
|
88 |
+
sanitization (#3529)
|
89 |
+
- Performance improvements in backend markdown cleaning for tag
|
90 |
+
rendering (#3599)
|
91 |
+
- CSS/layout performance optimization by applying layout/layer bound where
|
92 |
+
possible (#3642)
|
93 |
+
- The `tensorboard dev list` subcommand now reports the total size of stored
|
94 |
+
tensors (used as the backing storage type for Histograms) (#3652)
|
95 |
+
|
96 |
+
## TensorBoard.dev updates
|
97 |
+
|
98 |
+
- TensorBoard.dev now supports the Histograms plugin, for experiments
|
99 |
+
uploaded starting from this release
|
100 |
+
- The `tensorboard dev upload` subcommand now sends the histograms, when
|
101 |
+
available, so that it can be rendered via the Histograms plugin on
|
102 |
+
TensorBoard.dev
|
103 |
+
- This release may support additional plugins in the future, once those plugins
|
104 |
+
are enabled in the TensorBoard.dev service
|
105 |
+
|
106 |
+
## Breaking changes
|
107 |
+
|
108 |
+
- The experimental and legacy SQLite support (via the `--db_import` and `--db`
|
109 |
+
flags) is removed to ease maintenance (#3539)
|
110 |
+
|
111 |
+
# Release 2.2.1
|
112 |
+
|
113 |
+
## TensorBoard.dev updates
|
114 |
+
|
115 |
+
- TensorBoard.dev now renders model graphs, for experiments uploaded starting
|
116 |
+
from this release.
|
117 |
+
- The `tensorboard dev upload` subcommand now sends the model graph, when
|
118 |
+
available, so that it can be rendered via the Graphs plugin on
|
119 |
+
TensorBoard.dev.
|
120 |
+
- Large node attribute values (which would not be rendered anyway) are
|
121 |
+
filtered out before upload.
|
122 |
+
- Graphs that remain larger than 10MB after filtering are not uploaded.
|
123 |
+
- The `tensorboard dev upload` command supports a `--plugins` option to
|
124 |
+
explicitly indicate the desired plugins for which summary data should be
|
125 |
+
uploaded (#3402, #3492)
|
126 |
+
- The `tensorboard dev list` subcommand now reports the total size of stored
|
127 |
+
binary objects (e.g., graphs) for each experiment (#3464)
|
128 |
+
- The `tensorboard dev list` subcommand now accepts a `--json` flag to allow
|
129 |
+
parsing the output more easily (#3480)
|
130 |
+
|
131 |
+
## Features
|
132 |
+
|
133 |
+
- Auto-reload is now disabled when the browser tab is not visible, saving
|
134 |
+
network bandwidth (#3483)
|
135 |
+
- New logo used in the favicon (#3406)
|
136 |
+
|
137 |
+
## Bug fixes
|
138 |
+
|
139 |
+
- Plugin loading: When a plugin fails to load, TensorBoard logs an error and
|
140 |
+
continues, instead of crashing (#3484, #3486)
|
141 |
+
- Eliminated sporadic HTTP 500 errors for XHRs that do markdown rendering (#3491)
|
142 |
+
|
143 |
+
# Release 2.2.0
|
144 |
+
|
145 |
+
The 2.2 minor series tracks TensorFlow 2.2.
|
146 |
+
|
147 |
+
## Features
|
148 |
+
|
149 |
+
- Profile plugin now should be pip installed from `tensorboard-plugin-profile`.
|
150 |
+
The new version works in Chrome 80 and Firefox, has better model insights and
|
151 |
+
will be more actively maintained.
|
152 |
+
- Add S3_ENDPOINT variable (#3368) - thanks @thealphacod3r
|
153 |
+
- Confirm that the connection to tensorboard works or change to localhost
|
154 |
+
(#2371) - thanks @miguelmorin
|
155 |
+
- Update --reload_multifile_inactive_secs default to 24 hours (#3243)
|
156 |
+
- New `tensorboard dev update-metadata` command allows for updating the name and
|
157 |
+
description of experiments (#3277)
|
158 |
+
- Improved organization of artifacts downloaded during export from
|
159 |
+
TensorBoard.dev (#3307)
|
160 |
+
|
161 |
+
## Bug fixes
|
162 |
+
|
163 |
+
- Fix for #3282 where the tooltip would remain even after the mouse leaves the
|
164 |
+
plot (#3347)
|
165 |
+
- Internal fix: HParams summary protos now properly include tensor values (#3386)
|
166 |
+
- Fixes to profiling tutorial (#3372 & #3381)
|
167 |
+
|
168 |
+
## Breaking Changes
|
169 |
+
- Note: As of TensorBoard 2.1.1+, only Python 3 is supported. There will be no
|
170 |
+
further releases for Python 2 as per
|
171 |
+
https://groups.google.com/a/tensorflow.org/forum/#!topic/developers/ifEAGK3aPls
|
172 |
+
|
173 |
+
|
174 |
+
# Release 2.1.1
|
175 |
+
|
176 |
+
## Features
|
177 |
+
|
178 |
+
- Uploader: Added ability to upload and modify experiment name and description (#3277)
|
179 |
+
|
180 |
+
## Breaking changes
|
181 |
+
|
182 |
+
- As per
|
183 |
+
https://groups.google.com/a/tensorflow.org/forum/#!topic/developers/ifEAGK3aPls
|
184 |
+
this patch does not support Python 2. Only Python 3 is supported
|
185 |
+
|
186 |
+
|
187 |
+
# Release 2.1.0
|
188 |
+
|
189 |
+
The 2.1 minor series tracks TensorFlow 2.1.
|
190 |
+
|
191 |
+
## Features
|
192 |
+
|
193 |
+
- Debugger: added ability to display Tensors as images, with selectable color map and zooming (#2729, #2764)
|
194 |
+
- What-If Tool improvements:
|
195 |
+
- Added ability to set custom distance function for counterfactuals (#2607)
|
196 |
+
- Added ability to explore counterfactual examples for regression models (#2647)
|
197 |
+
- Added ability to consume arbitrary prediction-time information (#2660)
|
198 |
+
- Added ability to slice performance statistics by numeric features (in addition to categorical features) (#2678, #2704).
|
199 |
+
- Added PR/ROC curves by class for multi-class classification models (#2755)
|
200 |
+
- Improvements for plugin developers:
|
201 |
+
- Added support for communication between TensorBoard and plugins in iframes (#2309, #2703)
|
202 |
+
- (Experimental) Added library for improved plugin integration (#2708)
|
203 |
+
- Enabled dynamic plugins in TensorBoard within Colab (#2798)
|
204 |
+
- Security improvements, e.g. Content Security Policy configurations
|
205 |
+
- Reduced overhead of image, audio, and histogram summary writing API methods (#2899) - thanks @hongjunChoi
|
206 |
+
|
207 |
+
## Bug fixes
|
208 |
+
|
209 |
+
- What-If Tool:
|
210 |
+
- Fixed sometimes-stuck threshold sliders (#2682)
|
211 |
+
- Fixed PD plots in notebook mode with py3 kernels (#2669)
|
212 |
+
- Fixed info dialogs re. Fairness optimization (#2694)
|
213 |
+
- Scalars dashboard: fixed unreliable data loading over slow network connections (#2825)
|
214 |
+
- Fixed potential corruption when reading files from disk, when TensorFlow is not installed (#2791)
|
215 |
+
- Fixed writing of histogram summaries when using TPUs (#2883) - thanks @hongjunChoi
|
216 |
+
|
217 |
+
## TensorBoard.dev updates
|
218 |
+
|
219 |
+
- The `tensorboard dev list` subcommand now provides detailed metadata about
|
220 |
+
each experiment.
|
221 |
+
|
222 |
+
# Release 2.0.2
|
223 |
+
|
224 |
+
## Features
|
225 |
+
|
226 |
+
- Improvements to [TensorBoard.dev] support:
|
227 |
+
- New `tensorboard dev list` subcommand lists all experiments uploaded to
|
228 |
+
TensorBoard.dev (#2903)
|
229 |
+
- In the event of a transient backend issue or permanent breaking change, the
|
230 |
+
uploader can now gracefully degrade and print a diagnostic (#2879)
|
231 |
+
|
232 |
+
[TensorBoard.dev]: https://tensorboard.dev/
|
233 |
+
|
234 |
+
# Release 2.0.1
|
235 |
+
|
236 |
+
## Features
|
237 |
+
- Preview of TensorBoard.dev uploader! Check out <https://tensorboard.dev/> for
|
238 |
+
information and usage instructions.
|
239 |
+
|
240 |
+
# Release 2.0.0
|
241 |
+
|
242 |
+
The 2.0 minor series tracks TensorFlow 2.0.
|
243 |
+
|
244 |
+
## Breaking changes
|
245 |
+
|
246 |
+
- TensorBoard now serves on localhost only by default to avoid unintentional
|
247 |
+
overexposure. To expose TensorBoard to the network, either use a proxy, bind
|
248 |
+
to a specific hostname or IP address by using the `--host` flag, or explicitly
|
249 |
+
enable the previous behavior of binding on all network interfaces by passing
|
250 |
+
the flag `--bind_all`. See PR #2589.
|
251 |
+
|
252 |
+
- The `--logdir` flag no longer supports passing multiple comma-delimited paths,
|
253 |
+
which means that it now *supports* paths containing literal comma and colon
|
254 |
+
characters, like `./logs/m=10,n=20,lr=0.001` or `./logs/run_12:30:15`. To
|
255 |
+
mimic the old behavior, prefer using a tree of symlinks as it works with more
|
256 |
+
plugins, but as a fallback the flag `--logdir_spec` exposes the old behavior.
|
257 |
+
See PR #2664.
|
258 |
+
|
259 |
+
- Projector plugin `visualize_embeddings()` API now takes `logdir` as its first
|
260 |
+
parameter rather than `writer` (which only supported TF 1.x summary writers).
|
261 |
+
For backwards compatibility TF 1.x writers will still be accepted, but passing
|
262 |
+
the logdir explicitly is preferred since it works without any dependency on
|
263 |
+
TF 1.x or 2.x summary writing. See PR #2665.
|
264 |
+
|
265 |
+
- The namespace `tensorboard.summary.*` now aliases the summary API symbols in
|
266 |
+
`tensorboard.summary.v2.*` rather than those in `tensorboard.summary.v1.*`.
|
267 |
+
The old symbols can still be accessed under the `.v1` names. Note that the
|
268 |
+
new v2 API symbols are exposed in TF 2.0 as the new `tf.summary.*` API and
|
269 |
+
this is normally how they should be used. See PR #2670.
|
270 |
+
|
271 |
+
## Features
|
272 |
+
|
273 |
+
- Smarter log directory polling can be used by passing `--reload_multifile=true`
|
274 |
+
to poll all "active" event files in a directory rather than only the last one.
|
275 |
+
This avoids problems where data written to the non-last file never appears.
|
276 |
+
See PR #1867 for details, including how to adjust the "active" threshold.
|
277 |
+
|
278 |
+
- What-If Tool now can sort PD plots by interestingness (#2461)
|
279 |
+
|
280 |
+
|
281 |
+
# Release 1.15.0
|
282 |
+
|
283 |
+
The 1.15 minor series tracks TensorFlow 1.15.
|
284 |
+
|
285 |
+
## Features
|
286 |
+
- Embeddings projector now shows sprite images in the nearest neighbors list
|
287 |
+
(#2543) - thanks @beasteers
|
288 |
+
- When recording hyperparameters, the trial ID can now be customized, for easier
|
289 |
+
integration with existing tuner systems (#2442)
|
290 |
+
- Improvements to Colab and Jupyter notebook integration:
|
291 |
+
- The `TENSORBOARD_BINARY` environment variable can now be set to invoke a
|
292 |
+
non-default `tensorboard` binary (#2386)
|
293 |
+
- Error messages are now clearer when the TensorBoard binary fails to launch
|
294 |
+
(#2395)
|
295 |
+
- The `%tensorboard` magic no longer spams log messages when a different
|
296 |
+
version of TensorBoard is already running on the same machine (#2470)
|
297 |
+
- The `%tensorboard` magic can now be used in Jupyter notebooks running on
|
298 |
+
hosts other than `localhost` (#2407)
|
299 |
+
- What-If Tool improvements:
|
300 |
+
- Errors running inference are now surfaced in the What-If Tool UI (#2414)
|
301 |
+
- Median error stats are now displayed in addition to mean error stats (#2434)
|
302 |
+
- Mesh plugin improvements:
|
303 |
+
- Now compatible with TensorFlow 2.0 via a new `summary_v2` module (#2443)
|
304 |
+
- The number of vertices in the mesh can now be dynamic (#2373)
|
305 |
+
- Profile dashboard improvements:
|
306 |
+
- Wasted time now appears in the node table, and can be used as a sort key
|
307 |
+
(#2525)
|
308 |
+
- Memory bandwidth utilization now appears in the dashboard header (#2525)
|
309 |
+
- Improvements for plugin developers:
|
310 |
+
- Plugins can now be rendered in an iframe whose source is served from the
|
311 |
+
plugin backend, eliminating the need to bundle a frontend with the
|
312 |
+
TensorBoard binary
|
313 |
+
- Plugins can now be discovered dynamically and loaded at runtime, by defining
|
314 |
+
a `tensorboard_plugins` entry point
|
315 |
+
- See our [example dynamically loaded plugin][example-plugin] for a plugin to
|
316 |
+
use as a starting point, plus documentation
|
317 |
+
- TensorBoard now uses Polymer 2.7 (#2392, et al.)
|
318 |
+
|
319 |
+
[example-plugin]: https://github.com/tensorflow/tensorboard/tree/1.15/tensorboard/examples/plugins/example_basic#readme
|
320 |
+
|
321 |
+
## Bug fixes
|
322 |
+
- #2614 - “Toggle All Runs” button now behaves correctly on the first click when
|
323 |
+
many runs are loaded (PR #2633)
|
324 |
+
- Scalar charts should no longer “become tiny” on certain kinds of rendering
|
325 |
+
failures (PR #2605)
|
326 |
+
- #2028 - TensorBoard now logs less verbosely with Werkzeug 0.15.0 and up; it
|
327 |
+
now behaves the same across Werkzeug versions (PR #2383)
|
328 |
+
- The What-If Tool can now properly compare two regression models in the initial
|
329 |
+
Facets Dive view (PR #2414)
|
330 |
+
- Embedding projector metadata view now wraps long strings correctly (PR #2198)
|
331 |
+
|
332 |
+
|
333 |
+
# Release 1.14.0
|
334 |
+
|
335 |
+
## Features
|
336 |
+
- New hyperparameters dashboard: see [tutorial and demo][hparams-docs] and
|
337 |
+
[summary APIs][hparams-apis]
|
338 |
+
- New dashboard for visualizing meshes and point clouds: see
|
339 |
+
[README][mesh-readme]
|
340 |
+
- Graph dashboard now shows Keras conceptual graph: see [tutorial and
|
341 |
+
demo][conceptual-graph-docs]
|
342 |
+
- Embedding projector now supports the [UMAP dimensionality reduction
|
343 |
+
algorithm][umap] ([learn more about UMAP here][umap-tutorial]) (#1901) -
|
344 |
+
thanks @cannoneyed
|
345 |
+
- [TensorBoard notebook support][notebook-docs] is stabilized: in a Colab or
|
346 |
+
Jupyter notebook, run `%load_ext tensorboard` followed by `%tensorboard
|
347 |
+
--logdir ./path/to/logs`
|
348 |
+
- Profile dashboard improvements:
|
349 |
+
- New pod viewer tool to analyze TPU performance (#2111)
|
350 |
+
- Now allows capturing profiles from TensorBoard (#1894)
|
351 |
+
- What-If Tool improvements:
|
352 |
+
- Now available as a notebook widget for Jupyter and Colab: see
|
353 |
+
[demo][witwidget-demo]
|
354 |
+
- Now shows PR curves and F1 score (#2264)
|
355 |
+
- Now supports Cloud AI Platform, including XGBoost models (#2194)
|
356 |
+
- Now shows feature-level attributions for individual predictions, as
|
357 |
+
applicable (#2252)
|
358 |
+
- Image dashboard now allows scrolling for large images (#2164) - thanks @lr1d
|
359 |
+
- Scalar chart smoothing now caps at 0.999 for convenience (#1974) - thanks
|
360 |
+
@flostim
|
361 |
+
- Scalar chart scroll-to-zoom behavior now requires holding `Alt` (#2221)
|
362 |
+
- `tensorboard` now supports a `--version` command line argument (#2097) -
|
363 |
+
thanks @shashvatshahi1998
|
364 |
+
- Python API now defines `tensorboard.__version__` in addition to
|
365 |
+
`tensorboard.version.VERSION` (#2026)
|
366 |
+
|
367 |
+
## Bug fixes
|
368 |
+
- Projector metadata card now formats long words properly (PR #2016) - thanks
|
369 |
+
@makseq
|
370 |
+
- #2010 - `.tensorboard-info` is now world-writable for multi-user \*nix systems
|
371 |
+
(PR #2131)
|
372 |
+
- #1989 - `importlib.reload(tensorboard)` now works properly (PR #2005)
|
373 |
+
|
374 |
+
[conceptual-graph-docs]: https://www.tensorflow.org/tensorboard/r2/graphs
|
375 |
+
[hparams-apis]: https://github.com/tensorflow/tensorboard/blob/1.14/tensorboard/plugins/hparams/api.py#L15
|
376 |
+
[hparams-docs]: https://www.tensorflow.org/tensorboard/r2/hyperparameter_tuning_with_hparams
|
377 |
+
[mesh-readme]: https://github.com/tensorflow/tensorboard/blob/1.14/tensorboard/plugins/mesh/README.md#mesh-plugin
|
378 |
+
[notebook-docs]: https://www.tensorflow.org/tensorboard/r2/tensorboard_in_notebooks
|
379 |
+
[umap-tutorial]: https://umap-learn.readthedocs.io/en/latest/how_umap_works.html
|
380 |
+
[umap]: https://github.com/lmcinnes/umap#umap
|
381 |
+
[witwidget-demo]: https://colab.research.google.com/github/tensorflow/tensorboard/blob/1.14/tensorboard/plugins/interactive_inference/What_If_Tool_Notebook_Usage.ipynb
|
382 |
+
|
383 |
+
|
384 |
+
# Release 1.13.1
|
385 |
+
|
386 |
+
## Bug fixes
|
387 |
+
- #1895 - Fix `strftime`-related launch error on Windows (PR #1900)
|
388 |
+
- #1794 - Fix What-If Tool loading examples without inference (PR #1898)
|
389 |
+
- #1914 - Disable the profile dashboard inside Colab, where it doesn’t work
|
390 |
+
- #1945 - Fix profile dashboard loading behavior
|
391 |
+
|
392 |
+
|
393 |
+
# Release 1.13.0
|
394 |
+
|
395 |
+
The 1.13 minor series tracks TensorFlow 1.13.
|
396 |
+
|
397 |
+
Compatibility note: As of 1.13, TensorBoard has begun transitioning its own use
|
398 |
+
of some TensorFlow APIs to align with their naming in TF 2.0, and as a result
|
399 |
+
TensorBoard 1.13+ strictly requires TensorFlow 1.13+.
|
400 |
+
|
401 |
+
## Features
|
402 |
+
- What-If tool notebook mode and general improvements
|
403 |
+
- Now usable directly inside Jupyter and Colab notebooks (#1662, #1745, #1788)
|
404 |
+
- Added comparison of multiple models (#1589, #1672)
|
405 |
+
- Added CSV loading model (#1597)
|
406 |
+
- Added global partial dependence plots (#1604)
|
407 |
+
- Added custom prediction function support (#1842)
|
408 |
+
- (Alpha) TensorBoard can be embedded inside Juptyer and Colab notebooks via a
|
409 |
+
`%tensorboard` magic, after loading the `tb.notebook` extension (#1813, #1822)
|
410 |
+
- Profile dashboard overview page now shows step time breakdown (PR #1683)
|
411 |
+
- Line chart "log" scale is now a true log scale (#1507)
|
412 |
+
- When no --port flag is specified, TensorBoard will now search for open ports
|
413 |
+
near the default port (6006) if that port is already in use (#1851)
|
414 |
+
|
415 |
+
## Performance improvements
|
416 |
+
- Faster event file loading by caching runtime check (PR #1686) - thanks @abiro
|
417 |
+
|
418 |
+
## Bug fixes
|
419 |
+
- #786 (partial) - Avoid trying to smooth plots of constant y-value (PR #1698)
|
420 |
+
- #1515 - Fix image right-click accessiblity in non-Chromium browsers (PR #1561)
|
421 |
+
- #1541 - Fix --event_file flag when using --inspect
|
422 |
+
- #1566 - Fix error on trying to import "google.protobuf.pyext" (PR #1887)
|
423 |
+
- #1567 - Fix display bug on line chart after toggling series selection
|
424 |
+
- #1598 - Fix clipping in graph dashboard PNG download (PR #1600)
|
425 |
+
- #1601 - Fix chart SVG download option in Firefox
|
426 |
+
- #1623 - Fix --path_prefix interpretation
|
427 |
+
- #1838 - Fix run selector synchronization across already-loaded dashboards
|
428 |
+
|
429 |
+
|
430 |
+
# Release 1.12.2
|
431 |
+
|
432 |
+
## Bug fixes
|
433 |
+
- #1620 - Fix path_prefix flag regression (PR #1623)
|
434 |
+
- #1704 - Fix debugger sidebar resizer
|
435 |
+
|
436 |
+
|
437 |
+
# Release 1.12.1
|
438 |
+
|
439 |
+
## Bug fixes
|
440 |
+
- #1549 - Run names wrap at all character (PR #1602) - thanks @dgrahn
|
441 |
+
- #1610 - Fix Download as PNG for large graph
|
442 |
+
- #1684 - Fix bug rendering debugger plugin (PR #1550) - thanks @longouyang
|
443 |
+
|
444 |
+
|
445 |
+
# Release 1.12.0
|
446 |
+
|
447 |
+
The 1.12 minor series tracks TensorFlow 1.12.
|
448 |
+
|
449 |
+
## Features
|
450 |
+
- New download-as-SVG option for scalar dashboard charts (#1446)
|
451 |
+
- Image dashboard should now detect and render SVG images (#1440)
|
452 |
+
- What-If Tool example viewer/loader improvements:
|
453 |
+
- Support for sampling examples to load (#1504)
|
454 |
+
- Support for viewing SequenceExamples (#1513)
|
455 |
+
- Improvements to saliency viewing/sorting (#1472)
|
456 |
+
- Profile tool shows per-program breakdown, idle time, and ops left out (#1470)
|
457 |
+
|
458 |
+
## Bug fixes
|
459 |
+
- #1463 - What-If tool now handles classes with blank labels (PR #1471)
|
460 |
+
- #1468 - Reduce clipping in graph plugin sidebar
|
461 |
+
- #1475 - Restore tag filter persistence to URL param and across dashboards
|
462 |
+
- #1477 - Fix bug rendering TPU profile dashboard overview page
|
463 |
+
- #1480 - Fix projector hanging due to infinite loop (PR #1481)
|
464 |
+
- #1491 - Restore spinner on line charts when loading data
|
465 |
+
- #1499 - Fix stale/incorrect line charts when filtering by tag (PR #1500)
|
466 |
+
- #1505 - Fix 404 console errors in Firefox - thanks @wdirons
|
467 |
+
- #1506 - Fix --purge_orphaned_data to allow passing false (PR #1511)
|
468 |
+
- #1508 - Make custom scalars chart ignore outliers functionality work
|
469 |
+
- #1524 - Preserve line chart zoom level when data refreshes
|
470 |
+
|
471 |
+
|
472 |
+
# Release 1.11.0
|
473 |
+
|
474 |
+
The 1.11 minor series tracks TensorFlow 1.11.
|
475 |
+
|
476 |
+
## Highlights
|
477 |
+
- New What-If Tool dashboard, which provides a simple, intuitive, and powerful
|
478 |
+
visual interface to play with a trained ML model on a set of data with
|
479 |
+
absolutely no code required. See for details:
|
480 |
+
https://github.com/tensorflow/tensorboard/tree/1.11/tensorboard/plugins/interactive_inference
|
481 |
+
|
482 |
+
## Features
|
483 |
+
- Graph dashboard now supports coloring nodes by XLA cluster (PR #1336)
|
484 |
+
- Last updated time appears in tooltip for refresh button (PR #1362)
|
485 |
+
- Line charts support pan w/ shift key, zoom w/ scroll wheel (PR #1429, #1456)
|
486 |
+
|
487 |
+
## Performance improvements
|
488 |
+
- Better UI animation/scrolling performance (#1311, #1357)
|
489 |
+
- Reduced Plottable MouseInteraction overhead on hover (#1333/#1329)
|
490 |
+
- Optimized line chart tooltip redraw behavior (#1355)
|
491 |
+
|
492 |
+
## Bug fixes
|
493 |
+
- #982 - Fix spurious 404s for /[[_dataImageSrc]] or /[[_imageURL]] (PR #1315)
|
494 |
+
- #1320 - Fix port binding to disallow confusing IPv4/IPv6 port reuse (PR #1449)
|
495 |
+
- #1397 - Fix multi-part logdirs to correct expand ~ for user homedir
|
496 |
+
- #1396 - Fix "step" chart axis to show only integer ticks
|
497 |
+
- #1389 - Fix scalar card titles to omit common prefix (PR #1399)
|
498 |
+
- #1403 - Fix scalar chart shrinking problem on fast page changes
|
499 |
+
- #1406 - Fix scalar chart tooltip display to better avoid clipping
|
500 |
+
|
501 |
+
|
502 |
+
# Release 1.10.0
|
503 |
+
|
504 |
+
The 1.10 minor series tracks TensorFlow 1.10.
|
505 |
+
|
506 |
+
## Changes
|
507 |
+
- New logic for loading/launching TensorBoard (PR #1240)
|
508 |
+
- Plugin loading now uses new TBLoader API
|
509 |
+
- Argument parsing now uses argparse
|
510 |
+
- New `tb.program.launch()` API to launch TB from within Python
|
511 |
+
- Sidebars adjusted to be consistent across plugins (PR #1296)
|
512 |
+
- tb.summary.image() param order fixed to h, w (PR #1262) - thanks @ppwwyyxx
|
513 |
+
- New TPU profile dashboard progress bar for loading tools (PR #1286)
|
514 |
+
|
515 |
+
## Bug fixes
|
516 |
+
- #1260 - Fix missing pie chart in TPU profile input pipeline analyzer
|
517 |
+
- #1280 - Fix TPU profile memory viewer issue with XLA compatibility
|
518 |
+
- #1287 - Fix dangling UI interaction layer issue in vz-line-chart
|
519 |
+
- #1294 - Fix custom scalar dashboard to de-duplicate charts - thanks @lgeiger
|
520 |
+
|
521 |
+
|
522 |
+
# Release 1.9.0
|
523 |
+
|
524 |
+
The 1.9 minor series tracks TensorFlow 1.9.
|
525 |
+
|
526 |
+
## Highlights
|
527 |
+
|
528 |
+
- Improved performance with log directories on GCS (Google Cloud Storage) with
|
529 |
+
faster traversal time and reduced bandwidth consumption (PRs #1087, #1226)
|
530 |
+
- Profile dashboard improvements, including:
|
531 |
+
- New memory viewer tool that visualizes peak memory usage (#1223)
|
532 |
+
- Trace viewer tool now supports streaming mode, that dynamically renders a
|
533 |
+
much longer trace (#1128)
|
534 |
+
- Op profile tool now shows memory utilization in op details card (#1238)
|
535 |
+
- Profile dashboard now supports visualizing data from multiple hosts (#1117)
|
536 |
+
|
537 |
+
## Features
|
538 |
+
- Graph dashboard now allows searching nodes by regex (#1130)
|
539 |
+
- New --samples_per_plugin flag to control how many samples are kept (#1138)
|
540 |
+
- Better error when --logdir/--db flag is omitted (#1189) - thanks @oxinabox
|
541 |
+
- Debugger plugin can now show single elements of string tensors (#1131)
|
542 |
+
|
543 |
+
## Bug fixes
|
544 |
+
- #1107 - Beholder plugin should no longer reserve GPU (PR #1114)
|
545 |
+
- #1190 - Beholder plugin summary placeholder no longer interferes with normal
|
546 |
+
summary use and/or Estimator - thanks @TanUkkii007 (PR #1148)
|
547 |
+
- #427 and #588 - removed pip package deps on bleach and html5lib (PR #1142)
|
548 |
+
- #1191 - fixed debugger plugin UnboundLocalError - thanks @cfroehli
|
549 |
+
- #1200 - fixed debugger plugin binary-valued string tensor issues
|
550 |
+
- #1201 - fixed "dictionary changed size" race condition in reloader (PR #1235)
|
551 |
+
|
552 |
+
|
553 |
+
# Release 1.8.0
|
554 |
+
|
555 |
+
The 1.8 minor series tracks TensorFlow 1.8.
|
556 |
+
|
557 |
+
## Bug fixes
|
558 |
+
|
559 |
+
- #1082 - fixes rendering for certain graphs with metaedges/function nodes
|
560 |
+
- #1097 - correction to debugger plugin keras code snippet (PR #1100)
|
561 |
+
- #1111 - event reader logic now supports TF 1.8 GetNext() API (PR #1086)
|
562 |
+
|
563 |
+
|
564 |
+
# Release 1.7.0
|
565 |
+
|
566 |
+
The 1.7 minor series tracks TensorFlow 1.7.
|
567 |
+
|
568 |
+
## Highlights
|
569 |
+
|
570 |
+
- (Beta) New Beholder plugin that shows a live video feed of tensor data during
|
571 |
+
model training, by @chrisranderson. Caveat: only currently recommended for use
|
572 |
+
where TensorBoard and TensorFlow share a local disk. See for details:
|
573 |
+
https://github.com/tensorflow/tensorboard/tree/1.7/tensorboard/plugins/beholder
|
574 |
+
|
575 |
+
## Features
|
576 |
+
|
577 |
+
- Debugger tensor value card improvements:
|
578 |
+
- Entering new slice/time indices will automatically refresh view (#1017)
|
579 |
+
- Clicking title will highlight node in other parts of the UI (#1023)
|
580 |
+
- Debugger health pills now show number of NaN/Inf values if any (#1026)
|
581 |
+
|
582 |
+
## Changes
|
583 |
+
|
584 |
+
- Audio summary playback elements no longer loop by default (PR #1061), but
|
585 |
+
looping can be enabled for individual elements through a right-click option.
|
586 |
+
|
587 |
+
## Bug fixes
|
588 |
+
|
589 |
+
- #965 - pr_curve_streaming_op no longer results in duplicate plots (PR #1053)
|
590 |
+
- #967 - custom scalar margin plots with missing tags now indicate the run
|
591 |
+
- #970 - browser back button now works across home page (/) - thanks @brianwa84
|
592 |
+
- #990 - apple-touch-icon.png requests no longer trigger 404s - thanks @lanpa
|
593 |
+
- #1010 - content no longer intrudes into sidebar on narrow viewports
|
594 |
+
- #1016 - CTRL+C now exits TensorBoard even with debugger enabled (PR #975)
|
595 |
+
- #1021 - text plugin no longer always shows as inactive on first page load
|
596 |
+
|
597 |
+
|
598 |
+
# Release 1.6.0
|
599 |
+
|
600 |
+
NOTICE: TensorBoard 1.6.0+ has moved to the `tensorboard` package name on PyPI:
|
601 |
+
https://pypi.python.org/pypi/tensorboard. Only bugfix updates on 1.5.x will be
|
602 |
+
applied to the old package name (`tensorflow-tensorboard`). To upgrade to
|
603 |
+
TensorBoard 1.6.0+ we suggest you *first* `pip uninstall tensorflow-tensorboard`
|
604 |
+
before doing `pip install tensorboard`. See "Known Issues" below if you run into
|
605 |
+
problems using TensorBoard after upgrading.
|
606 |
+
|
607 |
+
The 1.6 minor series tracks TensorFlow 1.6.
|
608 |
+
|
609 |
+
## Highlights
|
610 |
+
|
611 |
+
- (Beta) New Profile dashboard, which provides a suite of tools for inspecting
|
612 |
+
TPU performance. See for details:
|
613 |
+
https://github.com/tensorflow/tensorboard/tree/1.6/tensorboard/plugins/profile
|
614 |
+
- (Alpha) New Debugger dashboard, which provides a visual interface to `tfdbg`,
|
615 |
+
the TensorFlow debugger. See for details:
|
616 |
+
https://github.com/tensorflow/tensorboard/tree/1.6/tensorboard/plugins/debugger
|
617 |
+
|
618 |
+
## Known issues
|
619 |
+
|
620 |
+
- Package `tensorboard` is installed but command and/or module are missing or
|
621 |
+
have the wrong version - this may be due to conflicts with other packages that
|
622 |
+
provide `tensorboard` scripts or modules. Please uninstall *all* such packages
|
623 |
+
including `tensorboard`, `tensorflow-tensorboard` and `tb-nightly` and then
|
624 |
+
reinstall `tensorboard`.
|
625 |
+
- Bazel 0.9.0+ required to build from source - this change was necessary in order
|
626 |
+
to add support for building at Bazel 0.10.0 and above. Please update Bazel.
|
627 |
+
|
628 |
+
|
629 |
+
# Release 1.5.1
|
630 |
+
|
631 |
+
NOTICE: TensorBoard 1.6.0+ will move to the `tensorboard` package name on PyPI,
|
632 |
+
instead of using `tensorflow-tensorboard`. Only bugfix updates on 1.5.x will be
|
633 |
+
applied to the old package name. To upgrade to TensorBoard 1.6.0+ please *first*
|
634 |
+
`pip uninstall tensorflow-tensorboard` before doing `pip install tensorboard`.
|
635 |
+
|
636 |
+
The 1.5 minor series tracks TensorFlow 1.5.
|
637 |
+
|
638 |
+
## Bug fixes
|
639 |
+
|
640 |
+
- #554 - line charts no longer "shrink" after run changes on other tabs
|
641 |
+
- #889 - fixed xComponentsCreationMethod default in vz-line-chart
|
642 |
+
- #898 - fixed offset of checkbox label in projector dashboard - thanks @marcj
|
643 |
+
- #903 - disabled margin plot smoothing to avoid line going out of bounds
|
644 |
+
- #916 - made `futures` dependency py2-only to avoid install failures
|
645 |
+
- #924 - fixed graph dashboard bug causing blank PNG download and minimap
|
646 |
+
- #926 - made projector plugin API available in pip package
|
647 |
+
|
648 |
+
## Documentation updates
|
649 |
+
|
650 |
+
- Custom scalars documentation now documents margin plots feature (#878)
|
651 |
+
- FAQ updated to describe custom scalars plugin use cases
|
652 |
+
|
653 |
+
|
654 |
+
# Release 1.5.0
|
655 |
+
|
656 |
+
The 1.5 minor series tracks TensorFlow 1.5.
|
657 |
+
|
658 |
+
## Highlights
|
659 |
+
|
660 |
+
- New Custom Scalars dashboard, which can display configurable custom line and
|
661 |
+
margin charts based on the same data as the regular Scalars dashboard. See
|
662 |
+
for details: https://github.com/tensorflow/tensorboard/tree/1.5/tensorboard/plugins/custom_scalar
|
663 |
+
- Many projector plugin enhancements thanks to @francoisluus, which enable
|
664 |
+
cognitive-assisted labeling via semi-supervised t-SNE
|
665 |
+
- t-SNE specific features: semi-supervision (#811) plus perturb (#705) and
|
666 |
+
pause/resume (#691) buttons
|
667 |
+
- general features: metadata editor (#753), selection edit mode (#697), edit
|
668 |
+
box for neighbors slider (#733), 2D sprite element zooming (#696)
|
669 |
+
|
670 |
+
## Features
|
671 |
+
|
672 |
+
- Image dashboard brightness and constrast sliders (#771) - thanks @edmundtong
|
673 |
+
- Top-level dashboard tabs now scroll when there are too many to fit (#730)
|
674 |
+
- Settable browser window title with --window_title flag (#804) - thanks @tkunic
|
675 |
+
- Tag filters are now reflected in the URL, making them saveable (#787)
|
676 |
+
- Pane-based dashboards now only load charts from first two panes by default,
|
677 |
+
which should improve responsiveness (#643 defaults tag filter search string
|
678 |
+
to empty, and #871 makes first two panes open by default)
|
679 |
+
- Lower latency to serve TensorBoard HTML thanks to preloading in memory (#708)
|
680 |
+
- Lazy imports ("import tensorboard as tb") now work for summary APIs (#778)
|
681 |
+
- PR curve summaries now have pb (#633) and raw_data_pb (#646) versions
|
682 |
+
|
683 |
+
## Bug fixes
|
684 |
+
|
685 |
+
- #265 - fixed `--logdir` to handle Windows drive letters - thanks @shakedel
|
686 |
+
- #784 - fixed bug in find similar subgraph algo - thanks @trisolaran
|
687 |
+
- Graph plugin fixed to
|
688 |
+
- correctly render function nodes (#817)
|
689 |
+
- pan to nodes more reliably (#824, #837)
|
690 |
+
- rebuild hierarchy if callbacks change to avoid race in rendering (#879)
|
691 |
+
|
692 |
+
|
693 |
+
# Release 0.4.0
|
694 |
+
|
695 |
+
The 0.4 minor series tracks TensorFlow 1.4.
|
696 |
+
|
697 |
+
## Features
|
698 |
+
|
699 |
+
- PR Curve plugin has a full-featured new dashboard (#387, #426, many others)
|
700 |
+
- PR Curve plugin has new streaming and raw summary-writing ops (#520, #587)
|
701 |
+
- Graph plugin has a new "Functions" scene group to show function libraries and
|
702 |
+
links to function calls (#394, #395, #497, #551, others)
|
703 |
+
- Graph plugin metanodes are now colored more helpfully (#467)
|
704 |
+
- Graph plugin selected run is now persisted to URL (#505)
|
705 |
+
- Standard dashboard card header UI is more compact and readable (#430)
|
706 |
+
- Pagination limit can now be configured in settings (#535)
|
707 |
+
- Text plugin now has op and pb summary writing methods (#510)
|
708 |
+
- Reduced boilerplate and cleaner API hooks for custom plugins (#611, #620)
|
709 |
+
- Faster initial loads due to improved active plugin detection (#621, #663)
|
710 |
+
- Reuse of TCP connections with switch to using HTTP/1.1 (#617)
|
711 |
+
|
712 |
+
## Bug fixes
|
713 |
+
|
714 |
+
- #477 - fixed URLs to properly URI-encode run and tag names
|
715 |
+
- #610 - fixed smoothing algorithm initial value bias - thanks @alexirpan
|
716 |
+
- #647 - fixed text plugin decoding error that led to bad markdown processing
|
saved_models/tensorboard/SECURITY.md
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# TensorBoard Security
|
2 |
+
|
3 |
+
Please refer to [TensorFlow’s security model and guidelines][tf-security].
|
4 |
+
|
5 |
+
To report any security related issues, please email `security@tensorflow.org`
|
6 |
+
[as described in TensorFlow’s `SECURITY.md`][email]. Consult that document for
|
7 |
+
details, including an encryption key for especially sensitive disclosures.
|
8 |
+
|
9 |
+
[email]: https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md#reporting-vulnerabilities
|
10 |
+
[tf-security]: https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md
|
saved_models/tensorboard/WORKSPACE
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
workspace(name = "org_tensorflow_tensorboard")
|
2 |
+
|
3 |
+
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
4 |
+
|
5 |
+
http_archive(
|
6 |
+
name = "io_bazel_rules_webtesting",
|
7 |
+
sha256 = "f89ca8e91ac53b3c61da356c685bf03e927f23b97b086cc593db8edc088c143f",
|
8 |
+
urls = [
|
9 |
+
# tag 0.3.1 resolves to commit afa8c4435ed8fd832046dab807ef998a26779ecb (2019-04-03 14:10:32 -0700)
|
10 |
+
"https://github.com/bazelbuild/rules_webtesting/releases/download/0.3.1/rules_webtesting.tar.gz",
|
11 |
+
],
|
12 |
+
)
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
http_archive(
|
17 |
+
name = "io_bazel_rules_closure",
|
18 |
+
sha256 = "6a900831c1eb8dbfc9d6879b5820fd614d4ea1db180eb5ff8aedcb75ee747c1f",
|
19 |
+
strip_prefix = "rules_closure-db4683a2a1836ac8e265804ca5fa31852395185b",
|
20 |
+
urls = [
|
21 |
+
"https://github.com/bazelbuild/rules_closure/archive/db4683a2a1836ac8e265804ca5fa31852395185b.tar.gz", # 2020-01-15
|
22 |
+
],
|
23 |
+
)
|
24 |
+
|
25 |
+
load("@io_bazel_rules_closure//closure:repositories.bzl", "rules_closure_dependencies")
|
26 |
+
rules_closure_dependencies(
|
27 |
+
)
|
28 |
+
|
29 |
+
http_archive(
|
30 |
+
name = "build_bazel_rules_nodejs",
|
31 |
+
sha256 = "f9e7b9f42ae202cc2d2ce6d698ccb49a9f7f7ea572a78fd451696d03ef2ee116",
|
32 |
+
urls = [
|
33 |
+
"https://github.com/bazelbuild/rules_nodejs/releases/download/1.6.0/rules_nodejs-1.6.0.tar.gz",
|
34 |
+
],
|
35 |
+
)
|
36 |
+
|
37 |
+
load("@build_bazel_rules_nodejs//:index.bzl", "yarn_install")
|
38 |
+
|
39 |
+
yarn_install(
|
40 |
+
name = "npm",
|
41 |
+
package_json = "//:package.json",
|
42 |
+
yarn_lock = "//:yarn.lock",
|
43 |
+
# Opt out of symlinking local node_modules folder into bazel internal
|
44 |
+
# directory. Symlinking is incompatible with our toolchain which often
|
45 |
+
# removes source directory without `bazel clean` which creates broken
|
46 |
+
# symlink into node_modules folder.
|
47 |
+
symlink_node_modules = False,
|
48 |
+
)
|
49 |
+
|
50 |
+
load("@npm//:install_bazel_dependencies.bzl", "install_bazel_dependencies")
|
51 |
+
|
52 |
+
install_bazel_dependencies()
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
|
57 |
+
load("@bazel_skylib//lib:versions.bzl", "versions")
|
58 |
+
# Keep this version in sync with the BAZEL environment variable defined
|
59 |
+
# in our .travis.yml config.
|
60 |
+
versions.check(minimum_bazel_version = "2.1.0")
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
+
# Please add all new dependencies in workspace.bzl.
|
66 |
+
load("//third_party:workspace.bzl", "tensorboard_workspace")
|
67 |
+
tensorboard_workspace()
|
saved_models/tensorboard/WORKSPACE.bak
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
workspace(name = "org_tensorflow_tensorboard")
|
2 |
+
|
3 |
+
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
4 |
+
|
5 |
+
http_archive(
|
6 |
+
name = "io_bazel_rules_webtesting",
|
7 |
+
sha256 = "f89ca8e91ac53b3c61da356c685bf03e927f23b97b086cc593db8edc088c143f",
|
8 |
+
urls = [
|
9 |
+
# tag 0.3.1 resolves to commit afa8c4435ed8fd832046dab807ef998a26779ecb (2019-04-03 14:10:32 -0700)
|
10 |
+
"http://mirror.tensorflow.org/github.com/bazelbuild/rules_webtesting/releases/download/0.3.1/rules_webtesting.tar.gz",
|
11 |
+
"https://github.com/bazelbuild/rules_webtesting/releases/download/0.3.1/rules_webtesting.tar.gz",
|
12 |
+
],
|
13 |
+
)
|
14 |
+
|
15 |
+
load("@io_bazel_rules_webtesting//web:repositories.bzl", "web_test_repositories")
|
16 |
+
web_test_repositories(omit_bazel_skylib = True)
|
17 |
+
|
18 |
+
load("@io_bazel_rules_webtesting//web:py_repositories.bzl", "py_repositories")
|
19 |
+
py_repositories()
|
20 |
+
|
21 |
+
http_archive(
|
22 |
+
name = "io_bazel_rules_closure",
|
23 |
+
sha256 = "6a900831c1eb8dbfc9d6879b5820fd614d4ea1db180eb5ff8aedcb75ee747c1f",
|
24 |
+
strip_prefix = "rules_closure-db4683a2a1836ac8e265804ca5fa31852395185b",
|
25 |
+
urls = [
|
26 |
+
"http://mirror.tensorflow.org/github.com/bazelbuild/rules_closure/archive/db4683a2a1836ac8e265804ca5fa31852395185b.tar.gz",
|
27 |
+
"https://github.com/bazelbuild/rules_closure/archive/db4683a2a1836ac8e265804ca5fa31852395185b.tar.gz", # 2020-01-15
|
28 |
+
],
|
29 |
+
)
|
30 |
+
|
31 |
+
load("@io_bazel_rules_closure//closure:repositories.bzl", "rules_closure_dependencies")
|
32 |
+
rules_closure_dependencies(
|
33 |
+
omit_bazel_skylib = True,
|
34 |
+
omit_com_google_protobuf = True,
|
35 |
+
omit_com_google_protobuf_js = True,
|
36 |
+
)
|
37 |
+
|
38 |
+
http_archive(
|
39 |
+
name = "build_bazel_rules_nodejs",
|
40 |
+
sha256 = "f9e7b9f42ae202cc2d2ce6d698ccb49a9f7f7ea572a78fd451696d03ef2ee116",
|
41 |
+
urls = [
|
42 |
+
"http://mirror.tensorflow.org/github.com/bazelbuild/rules_nodejs/releases/download/1.6.0/rules_nodejs-1.6.0.tar.gz",
|
43 |
+
"https://github.com/bazelbuild/rules_nodejs/releases/download/1.6.0/rules_nodejs-1.6.0.tar.gz",
|
44 |
+
],
|
45 |
+
)
|
46 |
+
|
47 |
+
load("@build_bazel_rules_nodejs//:index.bzl", "yarn_install")
|
48 |
+
|
49 |
+
yarn_install(
|
50 |
+
name = "npm",
|
51 |
+
package_json = "//:package.json",
|
52 |
+
yarn_lock = "//:yarn.lock",
|
53 |
+
# Opt out of symlinking local node_modules folder into bazel internal
|
54 |
+
# directory. Symlinking is incompatible with our toolchain which often
|
55 |
+
# removes source directory without `bazel clean` which creates broken
|
56 |
+
# symlink into node_modules folder.
|
57 |
+
symlink_node_modules = False,
|
58 |
+
)
|
59 |
+
|
60 |
+
load("@npm//:install_bazel_dependencies.bzl", "install_bazel_dependencies")
|
61 |
+
|
62 |
+
install_bazel_dependencies()
|
63 |
+
|
64 |
+
http_archive(
|
65 |
+
name = "io_bazel_rules_sass",
|
66 |
+
sha256 = "9dcfba04e4af896626f4760d866f895ea4291bc30bf7287887cefcf4707b6a62",
|
67 |
+
strip_prefix = "rules_sass-1.26.3",
|
68 |
+
urls = [
|
69 |
+
"http://mirror.tensorflow.org/github.com/bazelbuild/rules_sass/archive/1.26.3.zip",
|
70 |
+
"https://github.com/bazelbuild/rules_sass/archive/1.26.3.zip",
|
71 |
+
],
|
72 |
+
)
|
73 |
+
|
74 |
+
http_archive(
|
75 |
+
name = "org_tensorflow",
|
76 |
+
# NOTE: when updating this, MAKE SURE to also update the protobuf_js runtime version
|
77 |
+
# in third_party/workspace.bzl to >= the protobuf/protoc version provided by TF.
|
78 |
+
sha256 = "2595a5c401521f20a2734c4e5d54120996f8391f00bb62a57267d930bce95350",
|
79 |
+
strip_prefix = "tensorflow-2.3.0",
|
80 |
+
urls = [
|
81 |
+
"http://mirror.tensorflow.org/github.com/tensorflow/tensorflow/archive/v2.3.0.tar.gz", # 2020-07-23
|
82 |
+
"https://github.com/tensorflow/tensorflow/archive/v2.3.0.tar.gz",
|
83 |
+
],
|
84 |
+
)
|
85 |
+
|
86 |
+
load("@org_tensorflow//tensorflow:workspace.bzl", "tf_workspace")
|
87 |
+
# tf_workspace()
|
88 |
+
|
89 |
+
load("@bazel_skylib//lib:versions.bzl", "versions")
|
90 |
+
# Keep this version in sync with the BAZEL environment variable defined
|
91 |
+
# in our .travis.yml config.
|
92 |
+
versions.check(minimum_bazel_version = "2.1.0")
|
93 |
+
|
94 |
+
load("@io_bazel_rules_sass//:package.bzl", "rules_sass_dependencies")
|
95 |
+
|
96 |
+
rules_sass_dependencies()
|
97 |
+
|
98 |
+
load("@io_bazel_rules_sass//:defs.bzl", "sass_repositories")
|
99 |
+
|
100 |
+
sass_repositories()
|
101 |
+
|
102 |
+
load("@com_github_grpc_grpc//bazel:grpc_deps.bzl", "grpc_deps")
|
103 |
+
|
104 |
+
grpc_deps()
|
105 |
+
|
106 |
+
load("@upb//bazel:repository_defs.bzl", "bazel_version_repository")
|
107 |
+
|
108 |
+
bazel_version_repository(name = "bazel_version")
|
109 |
+
|
110 |
+
# Please add all new dependencies in workspace.bzl.
|
111 |
+
load("//third_party:workspace.bzl", "tensorboard_workspace")
|
112 |
+
tensorboard_workspace()
|
saved_models/tensorboard/ci/bazelrc
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Limit resources since Travis Trusty GCE VMs have 2 cores and 7.5 GB RAM.
|
2 |
+
build --local_resources=4000,2,1.0
|
3 |
+
build --worker_max_instances=2
|
4 |
+
|
5 |
+
# Ensure sandboxing is on to increase hermeticity.
|
6 |
+
build --spawn_strategy=sandboxed
|
7 |
+
build --worker_sandboxing
|
8 |
+
|
9 |
+
# Ensure the PATH env var from our virtualenv propagates into tests, which is
|
10 |
+
# no longer on by default in Bazel 0.21.0 and possibly again in the future.
|
11 |
+
# We set this flag for "build" since "test" inherits it, but if we don't set
|
12 |
+
# it for build too, this causes a rebuild at test time, and if we set it for
|
13 |
+
# both we hit https://github.com/bazelbuild/bazel/issues/8237.
|
14 |
+
#
|
15 |
+
# See also:
|
16 |
+
# https://github.com/bazelbuild/bazel/issues/7095 (protobuf PATH sensitivity)
|
17 |
+
# https://github.com/bazelbuild/bazel/issues/7026 (future of action_env)
|
18 |
+
build --action_env=PATH
|
19 |
+
|
20 |
+
# Set up caching on local disk so incremental builds are faster.
|
21 |
+
# See https://bazel.build/designs/2016/09/30/repository-cache.html
|
22 |
+
build --repository_cache=~/.cache/tb-bazel-repo
|
23 |
+
fetch --repository_cache=~/.cache/tb-bazel-repo
|
24 |
+
query --repository_cache=~/.cache/tb-bazel-repo
|
25 |
+
# See https://docs.bazel.build/versions/master/remote-caching.html#disk-cache
|
26 |
+
build --disk_cache=~/.cache/tb-bazel-disk
|
27 |
+
|
28 |
+
# Log more information to help with debugging, and disable curses output which
|
29 |
+
# just adds more clutter to the log. (Travis spoofs an interactive terminal.)
|
30 |
+
common --curses=no
|
31 |
+
build --verbose_failures
|
32 |
+
build --worker_verbose
|
33 |
+
test --test_output=errors
|
34 |
+
test --test_verbose_timeout_warnings
|
saved_models/tensorboard/ci/download_bazel.sh
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/sh
|
2 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
# ==============================================================================
|
16 |
+
|
17 |
+
# Script to download Bazel binary directly onto a build machine.
|
18 |
+
|
19 |
+
set -e
|
20 |
+
|
21 |
+
die() {
|
22 |
+
printf >&2 "%s\n" "$1"
|
23 |
+
exit 1
|
24 |
+
}
|
25 |
+
|
26 |
+
if [ "$#" -ne 3 ]; then
|
27 |
+
die "Usage: $0 <version> <sha256sum> <destination-file>"
|
28 |
+
fi
|
29 |
+
|
30 |
+
version="$1"
|
31 |
+
checksum="$2"
|
32 |
+
dest="$3"
|
33 |
+
|
34 |
+
mirror_url="http://mirror.tensorflow.org/github.com/bazelbuild/bazel/releases/download/${version}/bazel-${version}-linux-x86_64"
|
35 |
+
github_url="https://github.com/bazelbuild/bazel/releases/download/${version}/bazel-${version}-linux-x86_64"
|
36 |
+
|
37 |
+
exec "$(dirname "$0")/download_executable.sh" "${checksum}" "${dest}" \
|
38 |
+
"${mirror_url}" "${github_url}"
|
saved_models/tensorboard/ci/download_buildifier.sh
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/sh
|
2 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
# ==============================================================================
|
16 |
+
|
17 |
+
# Script to download Buildifier binary directly onto a build machine.
|
18 |
+
|
19 |
+
set -e
|
20 |
+
|
21 |
+
die() {
|
22 |
+
printf >&2 "%s\n" "$1"
|
23 |
+
exit 1
|
24 |
+
}
|
25 |
+
|
26 |
+
if [ "$#" -ne 3 ]; then
|
27 |
+
die "Usage: $0 <buildtools-version> <sha256sum> <destination-file>"
|
28 |
+
fi
|
29 |
+
|
30 |
+
version="$1"
|
31 |
+
checksum="$2"
|
32 |
+
dest="$3"
|
33 |
+
|
34 |
+
mirror_url="http://mirror.tensorflow.org/github.com/bazelbuild/buildtools/releases/download/${version}/buildifier"
|
35 |
+
github_url="https://github.com/bazelbuild/buildtools/releases/download/${version}/buildifier"
|
36 |
+
|
37 |
+
exec "$(dirname "$0")/download_executable.sh" "${checksum}" "${dest}" \
|
38 |
+
"${mirror_url}" "${github_url}"
|
saved_models/tensorboard/ci/download_buildozer.sh
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/sh
|
2 |
+
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
# ==============================================================================
|
16 |
+
|
17 |
+
# Script to download Buildozer binary directly onto a build machine.
|
18 |
+
|
19 |
+
set -e
|
20 |
+
|
21 |
+
die() {
|
22 |
+
printf >&2 "%s\n" "$1"
|
23 |
+
exit 1
|
24 |
+
}
|
25 |
+
|
26 |
+
if [ "$#" -ne 3 ]; then
|
27 |
+
die "Usage: $0 <buildtools-version> <sha256sum> <destination-file>"
|
28 |
+
fi
|
29 |
+
|
30 |
+
version="$1"
|
31 |
+
checksum="$2"
|
32 |
+
dest="$3"
|
33 |
+
|
34 |
+
mirror_url="http://mirror.tensorflow.org/github.com/bazelbuild/buildtools/releases/download/${version}/buildozer"
|
35 |
+
github_url="https://github.com/bazelbuild/buildtools/releases/download/${version}/buildozer"
|
36 |
+
|
37 |
+
exec "$(dirname "$0")/download_executable.sh" "${checksum}" "${dest}" \
|
38 |
+
"${mirror_url}" "${github_url}"
|
saved_models/tensorboard/ci/download_executable.sh
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/sh
|
2 |
+
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
# ==============================================================================
|
16 |
+
|
17 |
+
# Script to download a binary directly onto a build machine, with
|
18 |
+
# checksum verification.
|
19 |
+
|
20 |
+
set -e
|
21 |
+
|
22 |
+
die() {
|
23 |
+
printf >&2 "%s\n" "$1"
|
24 |
+
exit 1
|
25 |
+
}
|
26 |
+
|
27 |
+
if [ "$#" -lt 3 ]; then
|
28 |
+
die "Usage: $0 <sha256sum> <destination-file> <url> [<url>...]"
|
29 |
+
fi
|
30 |
+
|
31 |
+
checksum="$1"
|
32 |
+
dest="$2"
|
33 |
+
shift 2
|
34 |
+
|
35 |
+
temp_dest="$(mktemp)"
|
36 |
+
|
37 |
+
for url; do
|
38 |
+
wget -t 3 -O "${temp_dest}" "${url}" \
|
39 |
+
&& printf "%s %s\n" "${checksum}" "${temp_dest}" | shasum -a 256 --check \
|
40 |
+
|| { rm -f "${temp_dest}"; continue; }
|
41 |
+
mv "${temp_dest}" "${dest}"
|
42 |
+
break
|
43 |
+
done
|
44 |
+
|
45 |
+
|
46 |
+
[ -f "${dest}" ]
|
47 |
+
chmod +x "${dest}"
|
48 |
+
ls -l "${dest}"
|
saved_models/tensorboard/package.json
ADDED
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "tensorboard",
|
3 |
+
"version": "0.0.0-unused",
|
4 |
+
"description": "TensorFlow's visualization toolkit",
|
5 |
+
"private": true,
|
6 |
+
"directories": {
|
7 |
+
"doc": "docs"
|
8 |
+
},
|
9 |
+
"scripts": {
|
10 |
+
"postinstall": "ngcc -p main es2015",
|
11 |
+
"build": "bazel build //...",
|
12 |
+
"test": "ibazel test //...",
|
13 |
+
"lint": "prettier --check 'tensorboard/**/*.'{css,html,js,ts,scss} .github/**/*.yml",
|
14 |
+
"fix-lint": "prettier --write 'tensorboard/**/*.'{css,html,js,ts,scss} .github/**/*.yml"
|
15 |
+
},
|
16 |
+
"repository": {
|
17 |
+
"type": "git",
|
18 |
+
"url": "git+https://github.com/tensorflow/tensorboard.git"
|
19 |
+
},
|
20 |
+
"keywords": [
|
21 |
+
"ml",
|
22 |
+
"visualization"
|
23 |
+
],
|
24 |
+
"author": "TensorFlow authors",
|
25 |
+
"license": "Apache-2.0",
|
26 |
+
"bugs": {
|
27 |
+
"url": "https://github.com/tensorflow/tensorboard/issues"
|
28 |
+
},
|
29 |
+
"homepage": "https://github.com/tensorflow/tensorboard#readme",
|
30 |
+
"devDependencies": {
|
31 |
+
"@angular/bazel": "^9.0.0",
|
32 |
+
"@angular/cli": "^9.1.2",
|
33 |
+
"@angular/compiler": "^9.0.0",
|
34 |
+
"@angular/compiler-cli": "^9.0.0",
|
35 |
+
"@bazel/bazel": "2.1",
|
36 |
+
"@bazel/ibazel": "^0.12.4",
|
37 |
+
"@bazel/jasmine": "^1.6.0",
|
38 |
+
"@bazel/karma": "1.6.0",
|
39 |
+
"@bazel/protractor": "^1.6.0",
|
40 |
+
"@bazel/rollup": "^1.6.0",
|
41 |
+
"@bazel/terser": "^1.6.0",
|
42 |
+
"@bazel/typescript": "^1.6.0",
|
43 |
+
"@rollup/plugin-commonjs": "^14.0.0",
|
44 |
+
"@rollup/plugin-node-resolve": "^8.4.0",
|
45 |
+
"@types/chai": "^4.2.7",
|
46 |
+
"@types/d3": "5.7.2",
|
47 |
+
"@types/jasmine": "^3.5.10",
|
48 |
+
"@types/lodash": "^4.14.158",
|
49 |
+
"@types/node": "^13.13.1",
|
50 |
+
"@types/requirejs": "^2.1.31",
|
51 |
+
"@types/resize-observer-browser": "^0.1.3",
|
52 |
+
"@types/sinon": "^7.5.2",
|
53 |
+
"chai": "^4.2.0",
|
54 |
+
"jasmine-core": "^3.5.0",
|
55 |
+
"karma": "5.0.2",
|
56 |
+
"karma-chrome-launcher": "^3.1.0",
|
57 |
+
"karma-firefox-launcher": "^1.3.0",
|
58 |
+
"karma-jasmine": "^3.1.0",
|
59 |
+
"karma-requirejs": "^1.1.0",
|
60 |
+
"karma-sourcemap-loader": "^0.3.7",
|
61 |
+
"prettier": "2.1.1",
|
62 |
+
"requirejs": "^2.3.6",
|
63 |
+
"rollup": "^2.26.4",
|
64 |
+
"sinon": "^7.4.1",
|
65 |
+
"terser": "^4.6.3",
|
66 |
+
"tslib": "^1.10.0",
|
67 |
+
"typescript": "3.8.3"
|
68 |
+
},
|
69 |
+
"dependencies": {
|
70 |
+
"@angular/animations": "^9.0.0",
|
71 |
+
"@angular/cdk": "^9.0.0",
|
72 |
+
"@angular/common": "^9.0.0",
|
73 |
+
"@angular/core": "^9.0.0",
|
74 |
+
"@angular/forms": "^9.0.0",
|
75 |
+
"@angular/http": "^7.2.16",
|
76 |
+
"@angular/localize": "^9.1.7",
|
77 |
+
"@angular/material": "^9.0.0",
|
78 |
+
"@angular/platform-browser": "^9.0.0",
|
79 |
+
"@angular/platform-browser-dynamic": "^9.0.0",
|
80 |
+
"@angular/router": "^9.0.0",
|
81 |
+
"@ngrx/effects": "^9.0.0",
|
82 |
+
"@ngrx/store": "^9.0.0",
|
83 |
+
"@polymer/decorators": "^3.0.0",
|
84 |
+
"@polymer/iron-behaviors": "^3.0.1",
|
85 |
+
"@polymer/iron-collapse": "^3.0.1",
|
86 |
+
"@polymer/iron-flex-layout": "^3.0.1",
|
87 |
+
"@polymer/iron-icon": "^3.0.1",
|
88 |
+
"@polymer/iron-icons": "^3.0.1",
|
89 |
+
"@polymer/iron-iconset-svg": "^3.0.1",
|
90 |
+
"@polymer/iron-list": "^3.1.0",
|
91 |
+
"@polymer/iron-pages": "^3.0.1",
|
92 |
+
"@polymer/paper-button": "^3.0.1",
|
93 |
+
"@polymer/paper-checkbox": "^3.1.0",
|
94 |
+
"@polymer/paper-dialog": "^3.0.1",
|
95 |
+
"@polymer/paper-dialog-scrollable": "^3.0.1",
|
96 |
+
"@polymer/paper-dropdown-menu": "^3.1.0",
|
97 |
+
"@polymer/paper-header-panel": "^3.0.1",
|
98 |
+
"@polymer/paper-icon-button": "^3.0.2",
|
99 |
+
"@polymer/paper-input": "^3.2.1",
|
100 |
+
"@polymer/paper-item": "^3.0.1",
|
101 |
+
"@polymer/paper-listbox": "^3.0.1",
|
102 |
+
"@polymer/paper-material": "^3.0.1",
|
103 |
+
"@polymer/paper-menu-button": "^3.0.1",
|
104 |
+
"@polymer/paper-progress": "^3.0.1",
|
105 |
+
"@polymer/paper-radio-button": "^3.0.1",
|
106 |
+
"@polymer/paper-radio-group": "^3.0.1",
|
107 |
+
"@polymer/paper-slider": "^3.0.1",
|
108 |
+
"@polymer/paper-spinner": "^3.0.2",
|
109 |
+
"@polymer/paper-styles": "^3.0.1",
|
110 |
+
"@polymer/paper-tabs": "^3.1.0",
|
111 |
+
"@polymer/paper-toast": "^3.0.1",
|
112 |
+
"@polymer/paper-toggle-button": "^3.0.1",
|
113 |
+
"@polymer/paper-toolbar": "^3.0.1",
|
114 |
+
"@polymer/paper-tooltip": "^3.0.1",
|
115 |
+
"@polymer/polymer": "^3.4.1",
|
116 |
+
"@tensorflow/tfjs": "^2.3.0",
|
117 |
+
"@vaadin/vaadin-grid": "^5.6.6",
|
118 |
+
"d3": "5.7.0",
|
119 |
+
"dagre": "^0.8.5",
|
120 |
+
"lodash": "^4.17.19",
|
121 |
+
"monaco-editor-core": "^0.20.0",
|
122 |
+
"monaco-languages": "^1.10.0",
|
123 |
+
"ngx-color-picker": "^9.0.0",
|
124 |
+
"numeric": "^1.2.6",
|
125 |
+
"plottable": "^3.9.0",
|
126 |
+
"requirejs": "^2.3.6",
|
127 |
+
"rxjs": "7.0.0-beta.0",
|
128 |
+
"search-query-parser": "^1.5.5",
|
129 |
+
"three": "~0.108.0",
|
130 |
+
"umap-js": "^1.3.2",
|
131 |
+
"web-animations-js": "^2.3.2",
|
132 |
+
"zone.js": "^0.10.2",
|
133 |
+
"terser-webpack-plugin": "^5.3.10",
|
134 |
+
"webpack": "^5.90.1"
|
135 |
+
}
|
136 |
+
}
|
saved_models/tensorboard/pyproject.toml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.black]
|
2 |
+
line-length = 80
|
3 |
+
# TODO(@wchargin): Drop `py35` here once we drop support for Python 3.5
|
4 |
+
# and aren't affected by <https://bugs.python.org/issue9232>.
|
5 |
+
target-version = ["py27", "py35", "py36", "py37", "py38"]
|
saved_models/tensorboard/python_configure.bzl.txt
ADDED
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Repository rule for Python autoconfiguration.
|
2 |
+
|
3 |
+
`python_configure` depends on the following environment variables:
|
4 |
+
|
5 |
+
* `PYTHON_BIN_PATH`: location of python binary.
|
6 |
+
* `PYTHON_LIB_PATH`: Location of python libraries.
|
7 |
+
"""
|
8 |
+
|
9 |
+
load(
|
10 |
+
"//third_party/remote_config:common.bzl",
|
11 |
+
"BAZEL_SH",
|
12 |
+
"PYTHON_BIN_PATH",
|
13 |
+
"PYTHON_LIB_PATH",
|
14 |
+
"TF_PYTHON_CONFIG_REPO",
|
15 |
+
"auto_config_fail",
|
16 |
+
"config_repo_label",
|
17 |
+
"execute",
|
18 |
+
"get_bash_bin",
|
19 |
+
"get_host_environ",
|
20 |
+
"get_python_bin",
|
21 |
+
"is_windows",
|
22 |
+
"raw_exec",
|
23 |
+
"read_dir",
|
24 |
+
)
|
25 |
+
|
26 |
+
def _genrule(src_dir, genrule_name, command, outs):
|
27 |
+
"""Returns a string with a genrule.
|
28 |
+
|
29 |
+
Genrule executes the given command and produces the given outputs.
|
30 |
+
"""
|
31 |
+
return (
|
32 |
+
"genrule(\n" +
|
33 |
+
' name = "' +
|
34 |
+
genrule_name + '",\n' +
|
35 |
+
" outs = [\n" +
|
36 |
+
outs +
|
37 |
+
"\n ],\n" +
|
38 |
+
' cmd = """\n' +
|
39 |
+
command +
|
40 |
+
'\n """,\n' +
|
41 |
+
")\n"
|
42 |
+
)
|
43 |
+
|
44 |
+
def _norm_path(path):
|
45 |
+
"""Returns a path with '/' and remove the trailing slash."""
|
46 |
+
path = path.replace("\\", "/")
|
47 |
+
if path[-1] == "/":
|
48 |
+
path = path[:-1]
|
49 |
+
return path
|
50 |
+
|
51 |
+
def _symlink_genrule_for_dir(
|
52 |
+
repository_ctx,
|
53 |
+
src_dir,
|
54 |
+
dest_dir,
|
55 |
+
genrule_name,
|
56 |
+
src_files = [],
|
57 |
+
dest_files = []):
|
58 |
+
"""Returns a genrule to symlink(or copy if on Windows) a set of files.
|
59 |
+
|
60 |
+
If src_dir is passed, files will be read from the given directory; otherwise
|
61 |
+
we assume files are in src_files and dest_files
|
62 |
+
"""
|
63 |
+
if src_dir != None:
|
64 |
+
src_dir = _norm_path(src_dir)
|
65 |
+
dest_dir = _norm_path(dest_dir)
|
66 |
+
files = "\n".join(read_dir(repository_ctx, src_dir))
|
67 |
+
|
68 |
+
# Create a list with the src_dir stripped to use for outputs.
|
69 |
+
dest_files = files.replace(src_dir, "").splitlines()
|
70 |
+
src_files = files.splitlines()
|
71 |
+
command = []
|
72 |
+
outs = []
|
73 |
+
for i in range(len(dest_files)):
|
74 |
+
if dest_files[i] != "":
|
75 |
+
# If we have only one file to link we do not want to use the dest_dir, as
|
76 |
+
# $(@D) will include the full path to the file.
|
77 |
+
dest = "$(@D)/" + dest_dir + dest_files[i] if len(dest_files) != 1 else "$(@D)/" + dest_files[i]
|
78 |
+
|
79 |
+
# Copy the headers to create a sandboxable setup.
|
80 |
+
cmd = "cp -f"
|
81 |
+
command.append(cmd + ' "%s" "%s"' % (src_files[i], dest))
|
82 |
+
outs.append(' "' + dest_dir + dest_files[i] + '",')
|
83 |
+
genrule = _genrule(
|
84 |
+
src_dir,
|
85 |
+
genrule_name,
|
86 |
+
" && ".join(command),
|
87 |
+
"\n".join(outs),
|
88 |
+
)
|
89 |
+
return genrule
|
90 |
+
|
91 |
+
def _get_python_lib(repository_ctx, python_bin):
|
92 |
+
"""Gets the python lib path."""
|
93 |
+
python_lib = get_host_environ(repository_ctx, PYTHON_LIB_PATH)
|
94 |
+
if python_lib != None:
|
95 |
+
return python_lib
|
96 |
+
|
97 |
+
# The interesting program to execute.
|
98 |
+
print_lib = [
|
99 |
+
"from __future__ import print_function",
|
100 |
+
"import site",
|
101 |
+
"import os",
|
102 |
+
"python_paths = []",
|
103 |
+
"if os.getenv('PYTHONPATH') is not None:",
|
104 |
+
" python_paths = os.getenv('PYTHONPATH').split(':')",
|
105 |
+
"try:",
|
106 |
+
" library_paths = site.getsitepackages()",
|
107 |
+
"except AttributeError:",
|
108 |
+
" from distutils.sysconfig import get_python_lib",
|
109 |
+
" library_paths = [get_python_lib()]",
|
110 |
+
"all_paths = set(python_paths + library_paths)",
|
111 |
+
"paths = []",
|
112 |
+
"for path in all_paths:",
|
113 |
+
" if os.path.isdir(path):",
|
114 |
+
" paths.append(path)",
|
115 |
+
"if len(paths) >=1:",
|
116 |
+
" print(paths[0])",
|
117 |
+
]
|
118 |
+
|
119 |
+
# The below script writes the above program to a file
|
120 |
+
# and executes it. This is to work around the limitation
|
121 |
+
# of not being able to upload files as part of execute.
|
122 |
+
cmd = "from os import linesep;"
|
123 |
+
cmd += "f = open('script.py', 'w');"
|
124 |
+
for line in print_lib:
|
125 |
+
cmd += "f.write(\"%s\" + linesep);" % line
|
126 |
+
cmd += "f.close();"
|
127 |
+
cmd += "from os import system;"
|
128 |
+
cmd += "system(r\"%s script.py\");" % python_bin
|
129 |
+
|
130 |
+
result = execute(repository_ctx, [python_bin, "-c", cmd])
|
131 |
+
return result.stdout.strip()
|
132 |
+
|
133 |
+
def _check_python_lib(repository_ctx, python_lib):
|
134 |
+
"""Checks the python lib path."""
|
135 |
+
cmd = 'test -d "%s" -a -x "%s"' % (python_lib, python_lib)
|
136 |
+
result = raw_exec(repository_ctx, [get_bash_bin(repository_ctx), "-c", cmd])
|
137 |
+
if result.return_code == 1:
|
138 |
+
auto_config_fail("Invalid python library path: %s" % python_lib)
|
139 |
+
|
140 |
+
def _check_python_bin(repository_ctx, python_bin):
|
141 |
+
"""Checks the python bin path."""
|
142 |
+
cmd = '[[ -x "%s" ]] && [[ ! -d "%s" ]]' % (python_bin, python_bin)
|
143 |
+
result = raw_exec(repository_ctx, [get_bash_bin(repository_ctx), "-c", cmd])
|
144 |
+
if result.return_code == 1:
|
145 |
+
auto_config_fail("--define %s='%s' is not executable. Is it the python binary?" % (
|
146 |
+
PYTHON_BIN_PATH,
|
147 |
+
python_bin,
|
148 |
+
))
|
149 |
+
|
150 |
+
def _get_python_include(repository_ctx, python_bin):
|
151 |
+
"""Gets the python include path."""
|
152 |
+
result = execute(
|
153 |
+
repository_ctx,
|
154 |
+
[
|
155 |
+
python_bin,
|
156 |
+
"-W ignore",
|
157 |
+
"-c",
|
158 |
+
"from __future__ import print_function;" +
|
159 |
+
"from distutils import sysconfig;" +
|
160 |
+
"print(sysconfig.get_python_inc())",
|
161 |
+
],
|
162 |
+
error_msg = "Problem getting python include path.",
|
163 |
+
error_details = ("Is the Python binary path set up right? " +
|
164 |
+
"(See ./configure or " + PYTHON_BIN_PATH + ".) " +
|
165 |
+
"Is distutils installed?"),
|
166 |
+
)
|
167 |
+
return result.stdout.splitlines()[0]
|
168 |
+
|
169 |
+
def _get_python_import_lib_name(repository_ctx, python_bin):
|
170 |
+
"""Get Python import library name (pythonXY.lib) on Windows."""
|
171 |
+
result = execute(
|
172 |
+
repository_ctx,
|
173 |
+
[
|
174 |
+
python_bin,
|
175 |
+
"-c",
|
176 |
+
"import sys;" +
|
177 |
+
'print("python" + str(sys.version_info[0]) + ' +
|
178 |
+
' str(sys.version_info[1]) + ".lib")',
|
179 |
+
],
|
180 |
+
error_msg = "Problem getting python import library.",
|
181 |
+
error_details = ("Is the Python binary path set up right? " +
|
182 |
+
"(See ./configure or " + PYTHON_BIN_PATH + ".) "),
|
183 |
+
)
|
184 |
+
return result.stdout.splitlines()[0]
|
185 |
+
|
186 |
+
def _get_numpy_include(repository_ctx, python_bin):
|
187 |
+
"""Gets the numpy include path."""
|
188 |
+
return execute(
|
189 |
+
repository_ctx,
|
190 |
+
[
|
191 |
+
python_bin,
|
192 |
+
"-W ignore",
|
193 |
+
"-c",
|
194 |
+
"from __future__ import print_function;" +
|
195 |
+
"import numpy;" +
|
196 |
+
" print(numpy.get_include());",
|
197 |
+
],
|
198 |
+
error_msg = "Problem getting numpy include path.",
|
199 |
+
error_details = "Is numpy installed?",
|
200 |
+
).stdout.splitlines()[0]
|
201 |
+
|
202 |
+
def _create_local_python_repository(repository_ctx):
|
203 |
+
"""Creates the repository containing files set up to build with Python."""
|
204 |
+
|
205 |
+
# Resolve all labels before doing any real work. Resolving causes the
|
206 |
+
# function to be restarted with all previous state being lost. This
|
207 |
+
# can easily lead to a O(n^2) runtime in the number of labels.
|
208 |
+
build_tpl = repository_ctx.path(Label("//third_party/py:BUILD.tpl"))
|
209 |
+
|
210 |
+
python_bin = get_python_bin(repository_ctx)
|
211 |
+
_check_python_bin(repository_ctx, python_bin)
|
212 |
+
python_lib = _get_python_lib(repository_ctx, python_bin)
|
213 |
+
_check_python_lib(repository_ctx, python_lib)
|
214 |
+
python_include = _get_python_include(repository_ctx, python_bin)
|
215 |
+
numpy_include = _get_numpy_include(repository_ctx, python_bin) + "/numpy"
|
216 |
+
python_include_rule = _symlink_genrule_for_dir(
|
217 |
+
repository_ctx,
|
218 |
+
python_include,
|
219 |
+
"python_include",
|
220 |
+
"python_include",
|
221 |
+
)
|
222 |
+
python_import_lib_genrule = ""
|
223 |
+
|
224 |
+
# To build Python C/C++ extension on Windows, we need to link to python import library pythonXY.lib
|
225 |
+
# See https://docs.python.org/3/extending/windows.html
|
226 |
+
if is_windows(repository_ctx):
|
227 |
+
python_include = _norm_path(python_include)
|
228 |
+
python_import_lib_name = _get_python_import_lib_name(repository_ctx, python_bin)
|
229 |
+
python_import_lib_src = python_include.rsplit("/", 1)[0] + "/libs/" + python_import_lib_name
|
230 |
+
python_import_lib_genrule = _symlink_genrule_for_dir(
|
231 |
+
repository_ctx,
|
232 |
+
None,
|
233 |
+
"",
|
234 |
+
"python_import_lib",
|
235 |
+
[python_import_lib_src],
|
236 |
+
[python_import_lib_name],
|
237 |
+
)
|
238 |
+
numpy_include_rule = _symlink_genrule_for_dir(
|
239 |
+
repository_ctx,
|
240 |
+
numpy_include,
|
241 |
+
"numpy_include/numpy",
|
242 |
+
"numpy_include",
|
243 |
+
)
|
244 |
+
|
245 |
+
platform_constraint = ""
|
246 |
+
if repository_ctx.attr.platform_constraint:
|
247 |
+
platform_constraint = "\"%s\"" % repository_ctx.attr.platform_constraint
|
248 |
+
repository_ctx.template("BUILD", build_tpl, {
|
249 |
+
"%{PYTHON_BIN_PATH}": python_bin,
|
250 |
+
"%{PYTHON_INCLUDE_GENRULE}": python_include_rule,
|
251 |
+
"%{PYTHON_IMPORT_LIB_GENRULE}": python_import_lib_genrule,
|
252 |
+
"%{NUMPY_INCLUDE_GENRULE}": numpy_include_rule,
|
253 |
+
"%{PLATFORM_CONSTRAINT}": platform_constraint,
|
254 |
+
})
|
255 |
+
|
256 |
+
def _create_remote_python_repository(repository_ctx, remote_config_repo):
|
257 |
+
"""Creates pointers to a remotely configured repo set up to build with Python.
|
258 |
+
"""
|
259 |
+
repository_ctx.template("BUILD", config_repo_label(remote_config_repo, ":BUILD"), {})
|
260 |
+
|
261 |
+
def _python_autoconf_impl(repository_ctx):
|
262 |
+
"""Implementation of the python_autoconf repository rule."""
|
263 |
+
if get_host_environ(repository_ctx, TF_PYTHON_CONFIG_REPO) != None:
|
264 |
+
_create_remote_python_repository(
|
265 |
+
repository_ctx,
|
266 |
+
get_host_environ(repository_ctx, TF_PYTHON_CONFIG_REPO),
|
267 |
+
)
|
268 |
+
else:
|
269 |
+
_create_local_python_repository(repository_ctx)
|
270 |
+
|
271 |
+
_ENVIRONS = [
|
272 |
+
BAZEL_SH,
|
273 |
+
PYTHON_BIN_PATH,
|
274 |
+
PYTHON_LIB_PATH,
|
275 |
+
]
|
276 |
+
|
277 |
+
local_python_configure = repository_rule(
|
278 |
+
implementation = _create_local_python_repository,
|
279 |
+
environ = _ENVIRONS,
|
280 |
+
attrs = {
|
281 |
+
"environ": attr.string_dict(),
|
282 |
+
"platform_constraint": attr.string(),
|
283 |
+
},
|
284 |
+
)
|
285 |
+
|
286 |
+
remote_python_configure = repository_rule(
|
287 |
+
implementation = _create_local_python_repository,
|
288 |
+
environ = _ENVIRONS,
|
289 |
+
remotable = True,
|
290 |
+
attrs = {
|
291 |
+
"environ": attr.string_dict(),
|
292 |
+
"platform_constraint": attr.string(),
|
293 |
+
},
|
294 |
+
)
|
295 |
+
|
296 |
+
python_configure = repository_rule(
|
297 |
+
implementation = _python_autoconf_impl,
|
298 |
+
environ = _ENVIRONS + [TF_PYTHON_CONFIG_REPO],
|
299 |
+
attrs = {
|
300 |
+
"platform_constraint": attr.string(),
|
301 |
+
},
|
302 |
+
)
|
303 |
+
"""Detects and configures the local Python.
|
304 |
+
|
305 |
+
Add the following to your WORKSPACE FILE:
|
306 |
+
|
307 |
+
```python
|
308 |
+
python_configure(name = "local_config_python")
|
309 |
+
```
|
310 |
+
|
311 |
+
Args:
|
312 |
+
name: A unique name for this workspace rule.
|
313 |
+
"""
|
saved_models/tensorboard/tensorboard/BUILD
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Description:
|
2 |
+
# TensorBoard, a dashboard for investigating TensorFlow
|
3 |
+
|
4 |
+
load("//tensorboard/defs:web.bzl", "tf_web_library")
|
5 |
+
|
6 |
+
package(default_visibility = [":internal"])
|
7 |
+
|
8 |
+
licenses(["notice"])
|
9 |
+
|
10 |
+
exports_files(["LICENSE"]) # Needed for internal repo.
|
11 |
+
|
12 |
+
package_group(
|
13 |
+
name = "internal",
|
14 |
+
packages = ["//tensorboard/..."],
|
15 |
+
)
|
16 |
+
|
17 |
+
|
18 |
+
# The dependencies needed to initialize the `tensorboard` module itself,
|
19 |
+
# which are not sufficient to resolve all of its lazy imports. Use only
|
20 |
+
# if you're intending to link in a proper subset of TensorBoard's public
|
21 |
+
# API, you're linking in that subset explicitly in your downstream
|
22 |
+
# target, and you know what you're doing.
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
tf_web_library(
|
27 |
+
name = "assets",
|
28 |
+
srcs = [
|
29 |
+
"//tensorboard/webapp:index.html",
|
30 |
+
"//tensorboard/webapp:index.js",
|
31 |
+
"//tensorboard/webapp:svg_bundle",
|
32 |
+
],
|
33 |
+
path = "/",
|
34 |
+
deps = [
|
35 |
+
"//tensorboard/plugins/debugger_v2/tf_debugger_v2_plugin/views/source_code/monaco:monaco_editor",
|
36 |
+
"//tensorboard/plugins/debugger_v2/tf_debugger_v2_plugin/views/source_code/monaco:monaco_languages",
|
37 |
+
"@com_google_fonts_roboto",
|
38 |
+
],
|
39 |
+
)
|
saved_models/tensorboard/tensorboard/components/BUILD
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
load("//tensorboard/defs:defs.bzl", "tf_js_binary", "tf_ts_library")
|
2 |
+
|
3 |
+
package(default_visibility = ["//tensorboard:internal"])
|
4 |
+
|
5 |
+
licenses(["notice"])
|
6 |
+
|
7 |
+
tf_ts_library(
|
8 |
+
name = "polymer3_ts_lib",
|
9 |
+
srcs = ["polymer3_lib.ts"],
|
10 |
+
deps = [
|
11 |
+
":polymer3_interop_helper",
|
12 |
+
"//tensorboard/plugins/audio/tf_audio_dashboard",
|
13 |
+
"//tensorboard/plugins/custom_scalar/tf_custom_scalar_dashboard",
|
14 |
+
"//tensorboard/plugins/distribution/tf_distribution_dashboard",
|
15 |
+
"//tensorboard/plugins/graph/tf_graph_dashboard",
|
16 |
+
"//tensorboard/plugins/histogram/tf_histogram_dashboard",
|
17 |
+
"//tensorboard/plugins/hparams/tf_hparams_dashboard",
|
18 |
+
"//tensorboard/plugins/image/tf_image_dashboard",
|
19 |
+
"//tensorboard/plugins/mesh/tf_mesh_dashboard",
|
20 |
+
"//tensorboard/plugins/pr_curve/tf_pr_curve_dashboard",
|
21 |
+
"//tensorboard/plugins/profile_redirect/tf_profile_redirect_dashboard",
|
22 |
+
"//tensorboard/plugins/scalar/tf_scalar_dashboard",
|
23 |
+
"//tensorboard/plugins/text/tf_text_dashboard",
|
24 |
+
],
|
25 |
+
)
|
26 |
+
|
27 |
+
tf_ts_library(
|
28 |
+
name = "polymer3_interop_helper",
|
29 |
+
srcs = ["polymer3_interop_helper.ts"],
|
30 |
+
deps = [
|
31 |
+
"//tensorboard/components/experimental/plugin_util:plugin_host",
|
32 |
+
"//tensorboard/components/tf_backend",
|
33 |
+
"//tensorboard/components/tf_color_scale",
|
34 |
+
"//tensorboard/components/tf_globals",
|
35 |
+
"//tensorboard/components/tf_markdown_view",
|
36 |
+
"//tensorboard/components/tf_paginated_view",
|
37 |
+
"//tensorboard/components/tf_storage",
|
38 |
+
],
|
39 |
+
)
|
40 |
+
|
41 |
+
tf_js_binary(
|
42 |
+
name = "polymer3_lib_binary_no_shim",
|
43 |
+
compile = True,
|
44 |
+
entry_point = ":polymer3_lib.ts",
|
45 |
+
deps = [":polymer3_ts_lib"],
|
46 |
+
)
|
47 |
+
|
48 |
+
genrule(
|
49 |
+
name = "polymer3_lib_binary",
|
50 |
+
srcs = [
|
51 |
+
# Do not sort. order is important.
|
52 |
+
"@npm//:node_modules/web-animations-js/web-animations-next-lite.min.js",
|
53 |
+
":polymer3_lib_binary_no_shim.js",
|
54 |
+
],
|
55 |
+
outs = ["polymer3_lib_binary.js"],
|
56 |
+
cmd = "for f in $(SRCS); do cat \"$$f\"; echo; done > $@",
|
57 |
+
)
|
58 |
+
|
59 |
+
tf_ts_library(
|
60 |
+
name = "analytics",
|
61 |
+
srcs = [
|
62 |
+
"analytics.ts",
|
63 |
+
],
|
64 |
+
)
|
65 |
+
|
66 |
+
tf_ts_library(
|
67 |
+
name = "security",
|
68 |
+
srcs = [
|
69 |
+
"security.ts",
|
70 |
+
],
|
71 |
+
strict_checks = False,
|
72 |
+
deps = [
|
73 |
+
"@npm//@polymer/polymer",
|
74 |
+
],
|
75 |
+
)
|
saved_models/tensorboard/tensorboard/components/analytics.ts
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
|
3 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
you may not use this file except in compliance with the License.
|
5 |
+
You may obtain a copy of the License at
|
6 |
+
|
7 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
|
9 |
+
Unless required by applicable law or agreed to in writing, software
|
10 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
See the License for the specific language governing permissions and
|
13 |
+
limitations under the License.
|
14 |
+
==============================================================================*/
|
15 |
+
|
16 |
+
// TODO(@jart): Give users the ability to opt-in to analytics.
|
17 |
+
// We fake the global 'ga' object, so the object is a noop. The
|
18 |
+
// google.analytics typing gives the object a type of UniversalAnalytics.ga.
|
19 |
+
// We do not track open source users.
|
20 |
+
(window as any)['ga'] = function () {};
|
saved_models/tensorboard/tensorboard/components/experimental/plugin_lib/BUILD
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
load("//tensorboard/defs:defs.bzl", "tf_ts_library")
|
2 |
+
|
3 |
+
package(default_visibility = ["//tensorboard:internal"])
|
4 |
+
|
5 |
+
licenses(["notice"])
|
6 |
+
|
7 |
+
# TODO(psybuzz): create a NPM package when a better requirement comes up using
|
8 |
+
# tf_js_binary.
|
9 |
+
tf_ts_library(
|
10 |
+
name = "plugin_lib",
|
11 |
+
srcs = [
|
12 |
+
"core.ts",
|
13 |
+
"index.ts",
|
14 |
+
"plugin-guest.ts",
|
15 |
+
"runs.ts",
|
16 |
+
],
|
17 |
+
deps = [
|
18 |
+
"//tensorboard/components/experimental/plugin_util:message",
|
19 |
+
],
|
20 |
+
)
|
21 |
+
|
22 |
+
tf_ts_library(
|
23 |
+
name = "plugin_lib_polymer_interop_internal",
|
24 |
+
srcs = [
|
25 |
+
"polymer-interop.ts",
|
26 |
+
],
|
27 |
+
deps = [
|
28 |
+
":plugin_lib",
|
29 |
+
],
|
30 |
+
)
|
saved_models/tensorboard/tensorboard/components/experimental/plugin_lib/core.ts
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
|
2 |
+
|
3 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
you may not use this file except in compliance with the License.
|
5 |
+
You may obtain a copy of the License at
|
6 |
+
|
7 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
|
9 |
+
Unless required by applicable law or agreed to in writing, software
|
10 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
See the License for the specific language governing permissions and
|
13 |
+
limitations under the License.
|
14 |
+
==============================================================================*/
|
15 |
+
import {sendMessage} from './plugin-guest';
|
16 |
+
|
17 |
+
/**
|
18 |
+
* When called from a plugin with plugin_name `N`, it returns a promise which
|
19 |
+
* resolves to an object containing values from the URL hash that begin with
|
20 |
+
* `p.N.`
|
21 |
+
* @return {!Promise<{Object<string, string>}>}
|
22 |
+
*/
|
23 |
+
export async function getURLPluginData() {
|
24 |
+
return sendMessage('experimental.GetURLPluginData');
|
25 |
+
}
|